[llvm] 0b79979 - [RISCV] Merge some rvv intrinsic test cases that only differ by XLen type.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 23 09:41:59 PST 2022


Author: Craig Topper
Date: 2022-01-23T09:36:14-08:00
New Revision: 0b799791807e6b23a568526484f6cdaf0984cf02

URL: https://github.com/llvm/llvm-project/commit/0b799791807e6b23a568526484f6cdaf0984cf02
DIFF: https://github.com/llvm/llvm-project/commit/0b799791807e6b23a568526484f6cdaf0984cf02.diff

LOG: [RISCV] Merge some rvv intrinsic test cases that only differ by XLen type.

Instead of having a test for i32 XLen and i64 XLen, use sed to
replace iXLen with i32/i64 before running llc.

This change covers all of the floating point tests.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vfadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfclass.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax.ll
    llvm/test/CodeGen/RISCV/rvv/vfmerge.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
    llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
    llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
deleted file mode 100644
index b91d86befbc87..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
+++ /dev/null
@@ -1,1356 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfadd.ll
index 5df1881bffb23..041580b2b49d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
deleted file mode 100644
index c86e1f334712a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 1 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 1 x i16> %0,
-  <vscale x 1 x half> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 2 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 2 x i16> %0,
-  <vscale x 2 x half> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 4 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 4 x i16> %0,
-  <vscale x 4 x half> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 8 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 8 x i16> %0,
-  <vscale x 8 x half> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 16 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 16 x i16> %0,
-  <vscale x 16 x half> %1,
-  <vscale x 16 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 32 x half> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 32 x i16> %0,
-  <vscale x 32 x half> %1,
-  <vscale x 32 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 1 x float> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 1 x i32> %0,
-  <vscale x 1 x float> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 2 x float> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 2 x i32> %0,
-  <vscale x 2 x float> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 4 x float> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 4 x i32> %0,
-  <vscale x 4 x float> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 8 x float> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 8 x i32> %0,
-  <vscale x 8 x float> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 16 x float> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 16 x i32> %0,
-  <vscale x 16 x float> %1,
-  <vscale x 16 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 1 x double> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 1 x i64> %0,
-  <vscale x 1 x double> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 2 x double> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 2 x i64> %0,
-  <vscale x 2 x double> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 4 x double> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 4 x i64> %0,
-  <vscale x 4 x double> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
-; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    ret
-  <vscale x 8 x double> %0,
-  i64 %1) nounwind {
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
-; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-  <vscale x 8 x i64> %0,
-  <vscale x 8 x double> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll
similarity index 91%
rename from llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfclass.ll
index ae9df2aefa4d9..e6aa398310956 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll
@@ -1,9 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
@@ -12,11 +14,11 @@ define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -25,7 +27,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
@@ -36,20 +38,20 @@ define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
@@ -58,11 +60,11 @@ define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -71,7 +73,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
@@ -82,20 +84,20 @@ define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
   <vscale x 2 x i16> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
@@ -104,11 +106,11 @@ define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -117,7 +119,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
@@ -128,20 +130,20 @@ define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
   <vscale x 4 x i16> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
@@ -150,11 +152,11 @@ define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -163,7 +165,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
@@ -174,20 +176,20 @@ define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
   <vscale x 8 x i16> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
@@ -196,11 +198,11 @@ define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -209,7 +211,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
@@ -220,20 +222,20 @@ define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
   <vscale x 16 x i16> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
 define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
@@ -242,11 +244,11 @@ define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -255,7 +257,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
@@ -266,20 +268,20 @@ define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
   <vscale x 32 x i16> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
@@ -288,11 +290,11 @@ define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -301,7 +303,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
@@ -312,20 +314,20 @@ define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
   <vscale x 1 x i32> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
@@ -334,11 +336,11 @@ define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -347,7 +349,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
@@ -358,20 +360,20 @@ define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
   <vscale x 2 x i32> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
@@ -380,11 +382,11 @@ define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -393,7 +395,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
@@ -404,20 +406,20 @@ define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
   <vscale x 4 x i32> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
@@ -426,11 +428,11 @@ define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -439,7 +441,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
@@ -450,20 +452,20 @@ define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
   <vscale x 8 x i32> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
 define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
@@ -472,11 +474,11 @@ define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -485,7 +487,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
@@ -496,20 +498,20 @@ define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
   <vscale x 16 x i32> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
@@ -518,11 +520,11 @@ define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -531,7 +533,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
@@ -542,20 +544,20 @@ define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
   <vscale x 1 x i64> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
@@ -564,11 +566,11 @@ define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -577,7 +579,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
@@ -588,20 +590,20 @@ define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
   <vscale x 2 x i64> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
@@ -610,11 +612,11 @@ define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -623,7 +625,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
@@ -634,20 +636,20 @@ define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
   <vscale x 4 x i64> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
@@ -656,11 +658,11 @@ define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
-  i32 %1) nounwind {
+  iXLen %1) nounwind {
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -669,7 +671,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
 define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
@@ -680,13 +682,13 @@ define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
   <vscale x 8 x i64> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,
-  i32 %3) nounwind {
+  iXLen %3) nounwind {
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
deleted file mode 100644
index 65270dc06336d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll
index 5549960bb7736..e8d6257f1e652 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 1
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 3
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
deleted file mode 100644
index 6fc87d15dac18..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll
index 1c8c2a80c90db..82ec8ca7cc74d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
deleted file mode 100644
index 75c0a7ff62a47..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
-    <vscale x 1 x half> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
-    <vscale x 2 x half> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
-    <vscale x 4 x half> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
-    <vscale x 8 x half> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
-    <vscale x 16 x half> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
-  <vscale x 32 x half>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
-    <vscale x 32 x half> %0,
-    i32 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
-    <vscale x 1 x float> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
-    <vscale x 2 x float> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
-    <vscale x 4 x float> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
-    <vscale x 8 x float> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
-    <vscale x 16 x float> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
-    <vscale x 1 x double> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
-    <vscale x 2 x double> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
-    <vscale x 4 x double> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
-    <vscale x 8 x double> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll
index 9c3e2a03f1141..0dbc0f221e7e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
   <vscale x 1 x i16>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
   <vscale x 2 x i16>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
   <vscale x 4 x i16>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
   <vscale x 8 x i16>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
   <vscale x 16 x i16>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
   <vscale x 32 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
   <vscale x 32 x i16>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
   <vscale x 1 x i32>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
   <vscale x 2 x i32>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
   <vscale x 4 x i32>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
   <vscale x 8 x i32>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
   <vscale x 16 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
   <vscale x 16 x i32>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
   <vscale x 1 x i64>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
   <vscale x 2 x i64>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
   <vscale x 4 x i64>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
   <vscale x 8 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
   <vscale x 8 x i64>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
deleted file mode 100644
index 3a309eea35dbd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll
index 966a5d6f85a0f..457a93587ec2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
   <vscale x 1 x i16>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
   <vscale x 2 x i16>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
   <vscale x 4 x i16>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
   <vscale x 8 x i16>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
   <vscale x 16 x i16>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
   <vscale x 32 x i16>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
   <vscale x 1 x i32>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
   <vscale x 2 x i32>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
   <vscale x 4 x i32>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
   <vscale x 8 x i32>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
   <vscale x 16 x i32>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
   <vscale x 1 x i64>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
   <vscale x 2 x i64>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
   <vscale x 4 x i64>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
   <vscale x 8 x i64>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
deleted file mode 100644
index f5984a512e0fe..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll
index 26632717dfa9f..88205bb75ce3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x h
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
   <vscale x 1 x i16>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x h
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
   <vscale x 2 x i16>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x h
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
   <vscale x 4 x i16>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x h
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
   <vscale x 8 x i16>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
   <vscale x 16 x i16>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
   <vscale x 32 x i16>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x f
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
   <vscale x 1 x i32>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x f
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
   <vscale x 2 x i32>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x f
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
   <vscale x 4 x i32>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x f
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
   <vscale x 8 x i32>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
   <vscale x 16 x i32>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x d
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
   <vscale x 1 x i64>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x d
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
   <vscale x 2 x i64>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x d
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
   <vscale x 4 x i64>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x d
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
   <vscale x 8 x i64>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
deleted file mode 100644
index 8fb44d2d1ecf0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll
index e76b0db05446e..fb8a4797f0ae8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
   <vscale x 1 x i16>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
   <vscale x 2 x i16>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
   <vscale x 4 x i16>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
   <vscale x 8 x i16>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 1
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
   <vscale x 16 x i16>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 3
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
   <vscale x 32 x i16>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
   <vscale x 1 x i32>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
   <vscale x 2 x i32>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
   <vscale x 4 x i32>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
   <vscale x 8 x i32>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 1
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
   <vscale x 16 x i32>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
   <vscale x 1 x i64>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
   <vscale x 2 x i64>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
   <vscale x 4 x i64>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
   <vscale x 8 x i64>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
deleted file mode 100644
index 2d4a16e1bf4e8..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
index 01bfb50ed9b49..0145f2ad764ec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
deleted file mode 100644
index 16d305bad846c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
index c5809888ff17b..5115a7548e2ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
deleted file mode 100644
index afd41bd8a2122..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
index cfb32cfab4cdc..9313e440e500f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
deleted file mode 100644
index 98b4cf71da14d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmax.ll
index 4fc7319fb2b29..446981928b6cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
deleted file mode 100644
index 3dc1240d0f7b6..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
+++ /dev/null
@@ -1,902 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half zeroinitializer,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half zeroinitializer,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half zeroinitializer,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half zeroinitializer,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half zeroinitializer,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half zeroinitializer,
-    <vscale x 32 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float zeroinitializer,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float zeroinitializer,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float zeroinitializer,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float zeroinitializer,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float zeroinitializer,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double zeroinitializer,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double zeroinitializer,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double zeroinitializer,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double zeroinitializer,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmerge.ll
index b23d908c7edda..eb3efd1fa0373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -27,9 +29,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -40,7 +42,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -62,7 +64,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,9 +73,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -84,7 +86,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -93,9 +95,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -115,9 +117,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -128,7 +130,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -137,9 +139,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -150,7 +152,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -159,9 +161,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -172,7 +174,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -181,9 +183,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -194,7 +196,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -203,9 +205,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -216,7 +218,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -225,9 +227,9 @@ declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -238,7 +240,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x half> %a
 }
@@ -247,9 +249,9 @@ declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 32 x half> %0,
     half %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x half> %a
 }
@@ -269,9 +271,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -282,7 +284,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -291,9 +293,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -304,7 +306,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -313,9 +315,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -326,7 +328,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -357,9 +359,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -370,7 +372,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -379,9 +381,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -392,7 +394,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -401,9 +403,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -414,7 +416,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -423,9 +425,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -436,7 +438,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -445,9 +447,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -458,7 +460,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -467,9 +469,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -480,7 +482,7 @@ entry:
     <vscale x 16 x float> %0,
     float %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -489,9 +491,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -546,7 +548,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -555,9 +557,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -568,7 +570,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -577,9 +579,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -590,7 +592,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -599,9 +601,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -612,7 +614,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -621,9 +623,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -634,7 +636,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -643,9 +645,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -656,12 +658,12 @@ entry:
     <vscale x 8 x double> %0,
     double %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -672,12 +674,12 @@ entry:
     <vscale x 1 x half> %0,
     half zeroinitializer,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -688,12 +690,12 @@ entry:
     <vscale x 2 x half> %0,
     half zeroinitializer,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -704,12 +706,12 @@ entry:
     <vscale x 4 x half> %0,
     half zeroinitializer,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -720,12 +722,12 @@ entry:
     <vscale x 8 x half> %0,
     half zeroinitializer,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -736,12 +738,12 @@ entry:
     <vscale x 16 x half> %0,
     half zeroinitializer,
     <vscale x 16 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -752,12 +754,12 @@ entry:
     <vscale x 32 x half> %0,
     half zeroinitializer,
     <vscale x 32 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -768,12 +770,12 @@ entry:
     <vscale x 1 x float> %0,
     float zeroinitializer,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -784,12 +786,12 @@ entry:
     <vscale x 2 x float> %0,
     float zeroinitializer,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -800,12 +802,12 @@ entry:
     <vscale x 4 x float> %0,
     float zeroinitializer,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -816,12 +818,12 @@ entry:
     <vscale x 8 x float> %0,
     float zeroinitializer,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -832,12 +834,12 @@ entry:
     <vscale x 16 x float> %0,
     float zeroinitializer,
     <vscale x 16 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -848,12 +850,12 @@ entry:
     <vscale x 1 x double> %0,
     double zeroinitializer,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -864,12 +866,12 @@ entry:
     <vscale x 2 x double> %0,
     double zeroinitializer,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -880,12 +882,12 @@ entry:
     <vscale x 4 x double> %0,
     double zeroinitializer,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -896,7 +898,7 @@ entry:
     <vscale x 8 x double> %0,
     double zeroinitializer,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
deleted file mode 100644
index 0861a787440e4..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmin.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmin.ll
index e647fe51ffb17..e151e9fb695de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
deleted file mode 100644
index 2a5fb2896aa56..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
index c8407dfe64730..cf9df7550fcc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
deleted file mode 100644
index 70efc0da21f5a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
index 620c3dcb1025a..d071893ceb085 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
deleted file mode 100644
index 08aa64b6de7fc..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmul.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmul.ll
index 50ebccd92e64c..0f4c738025617 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
deleted file mode 100644
index ee619309b9e3c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
+++ /dev/null
@@ -1,197 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s
-
-declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, i64)
-
-define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2)
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, i64)
-
-define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2)
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, i64)
-
-define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2)
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, i64)
-
-define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2)
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, i64)
-
-define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2)
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, i64)
-
-define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2)
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, i64)
-
-define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2)
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, i64)
-
-define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2)
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, i64)
-
-define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2)
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, i64)
-
-define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2)
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, i64)
-
-define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2)
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, i64)
-
-define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2)
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, i64)
-
-define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2)
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, i64)
-
-define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2)
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, i64)
-
-define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2)
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
similarity index 74%
rename from llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
index 4d47c000788fb..8464dc2f62997 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
@@ -1,197 +1,200 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
-declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, i32)
+declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, iXLen)
 
-define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, iXLen %2)
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, i32)
+declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, iXLen)
 
-define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, iXLen %2)
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, i32)
+declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, iXLen)
 
-define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, iXLen %2)
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, i32)
+declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, iXLen)
 
-define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, iXLen %2)
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, i32)
+declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, iXLen)
 
-define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, iXLen %2)
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, i32)
+declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, iXLen)
 
-define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2)
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, iXLen %2)
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, i32)
+declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, iXLen)
 
-define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2)
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2)
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, i32)
+declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, iXLen)
 
-define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2)
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2)
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, i32)
+declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, iXLen)
 
-define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2)
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2)
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, i32)
+declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, iXLen)
 
-define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2)
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2)
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, i32)
+declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, iXLen)
 
-define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2)
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2)
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, i32)
+declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, iXLen)
 
-define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2)
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, iXLen %2)
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, i32)
+declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, iXLen)
 
-define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2)
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, iXLen %2)
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, i32)
+declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, iXLen)
 
-define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2)
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, iXLen %2)
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, i32)
+declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, iXLen)
 
-define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2)
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, iXLen %2)
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
deleted file mode 100644
index 1c4d9f6e89dc4..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
+++ /dev/null
@@ -1,482 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -target-abi lp64d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
-    half %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
-    float %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
-    float %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
-    float %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
-    float %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
-    float %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
-    double %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
-    double %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
-    double %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
-    double %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
-    half 0.0,
-    i64 %0)
-
-  ret <vscale x 32 x half> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
-    float 0.0,
-    i64 %0)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
-    float 0.0,
-    i64 %0)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
-    float 0.0,
-    i64 %0)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
-    float 0.0,
-    i64 %0)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
-    float 0.0,
-    i64 %0)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
-    double 0.0,
-    i64 %0)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
-    double 0.0,
-    i64 %0)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
-    double 0.0,
-    i64 %0)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
-    double 0.0,
-    i64 %0)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll
index b4acc57dcd81e..6e0613e3e49ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -target-abi ilp32d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,16 +16,16 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwi
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -32,16 +34,16 @@ define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwi
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -50,16 +52,16 @@ define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwi
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -68,16 +70,16 @@ define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwi
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -86,16 +88,16 @@ define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) noun
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -104,16 +106,16 @@ define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) noun
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -122,16 +124,16 @@ define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) noun
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -140,16 +142,16 @@ define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) noun
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -158,16 +160,16 @@ define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) noun
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -176,16 +178,16 @@ define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) noun
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -194,16 +196,16 @@ define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) no
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -212,16 +214,16 @@ define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) no
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -230,16 +232,16 @@ define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) no
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -248,16 +250,16 @@ define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) no
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -266,12 +268,12 @@ define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) no
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -280,12 +282,12 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -294,12 +296,12 @@ define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -308,12 +310,12 @@ define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -322,12 +324,12 @@ define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -336,12 +338,12 @@ define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -350,12 +352,12 @@ define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind {
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -364,12 +366,12 @@ define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind {
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -378,12 +380,12 @@ define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind {
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -392,12 +394,12 @@ define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind {
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,12 +408,12 @@ define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind {
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -420,12 +422,12 @@ define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind {
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -434,12 +436,12 @@ define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind {
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -448,12 +450,12 @@ define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind {
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -462,12 +464,12 @@ define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind {
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -476,7 +478,7 @@ define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind {
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
deleted file mode 100644
index 2e35f75eb89c0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
-  <vscale x 1 x half>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
-  <vscale x 4 x half>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
-  <vscale x 8 x half>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
-  <vscale x 16 x half>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
-  <vscale x 2 x float>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
-  <vscale x 4 x float>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
-  <vscale x 8 x float>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
index 014ff81bada19..e757261e7363d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
   <vscale x 1 x half>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
   <vscale x 2 x half>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
   <vscale x 4 x half>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
   <vscale x 8 x half>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
   <vscale x 16 x half>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
   <vscale x 1 x float>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
   <vscale x 2 x float>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
   <vscale x 4 x float>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
   <vscale x 8 x float>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
deleted file mode 100644
index d8d55df8e4589..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
-    <vscale x 16 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
-    <vscale x 2 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
-    <vscale x 4 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
-    <vscale x 8 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll
index ab5f66bf692fa..eedc7c1633999 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
deleted file mode 100644
index 32a23932b074b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
-    <vscale x 16 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
-    <vscale x 2 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
-    <vscale x 4 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
-    <vscale x 8 x i64> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll
index 4835d4e5c5916..e6842b749492c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
deleted file mode 100644
index 4020c1d5d1a34..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
-  <vscale x 1 x half>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
-  <vscale x 4 x half>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
-  <vscale x 8 x half>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
-  <vscale x 16 x half>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
-  <vscale x 2 x float>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
-  <vscale x 4 x float>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
-  <vscale x 8 x float>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll
index b464fdde6db25..2a7c30939f108 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
   <vscale x 1 x half>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
   <vscale x 2 x half>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32(<vscale x 2 x half> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
   <vscale x 4 x half>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32(<vscale x 4 x half> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
   <vscale x 8 x half>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32(<vscale x 8 x half> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscal
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32
   <vscale x 16 x half>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32(<vscale x 16 x half> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
   <vscale x 1 x float>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64(<vscale x 1 x float> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
   <vscale x 2 x float>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64(<vscale x 2 x float> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
   <vscale x 4 x float>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64(<vscale x 4 x float> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
   <vscale x 8 x float>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64(<vscale x 8 x float> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
deleted file mode 100644
index ad695704aae0a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll
index 227210e6f2f01..9a14df186dd51 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
   <vscale x 1 x i8>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
   <vscale x 2 x i8>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
   <vscale x 4 x i8>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
   <vscale x 8 x i8>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
   <vscale x 16 x i8>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
   <vscale x 32 x i8>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
   <vscale x 1 x i16>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
   <vscale x 2 x i16>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
   <vscale x 4 x i16>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
   <vscale x 8 x i16>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
   <vscale x 16 x i16>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
   <vscale x 1 x i32>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
   <vscale x 2 x i32>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
   <vscale x 4 x i32>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
   <vscale x 8 x i32>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
deleted file mode 100644
index f7f873dd05155..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll
index 4bfe331db7e00..24d75d6d09b19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
   <vscale x 1 x i8>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
   <vscale x 2 x i8>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
   <vscale x 4 x i8>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
   <vscale x 8 x i8>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
   <vscale x 16 x i8>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
   <vscale x 32 x i8>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
   <vscale x 1 x i16>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
   <vscale x 2 x i16>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
   <vscale x 4 x i16>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
   <vscale x 8 x i16>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscal
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32
   <vscale x 16 x i16>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
   <vscale x 1 x i32>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
   <vscale x 2 x i32>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
   <vscale x 4 x i32>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
   <vscale x 8 x i32>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
deleted file mode 100644
index d78d695f3df20..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll
index 6c97455f1992e..0b8f9c62e50ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x ha
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
   <vscale x 1 x i8>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x ha
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
   <vscale x 2 x i8>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x ha
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
   <vscale x 4 x i8>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x ha
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
   <vscale x 8 x i8>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
   <vscale x 16 x i8>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
   <vscale x 32 x i8>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
   <vscale x 1 x i16>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
   <vscale x 2 x i16>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
   <vscale x 4 x i16>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
   <vscale x 8 x i16>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 1
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
   <vscale x 16 x i16>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
   <vscale x 1 x i32>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
   <vscale x 2 x i32>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
   <vscale x 4 x i32>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
   <vscale x 8 x i32>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
deleted file mode 100644
index c7bb913f3797f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll
index 4981f8b16d741..7d802cabd3f77 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x h
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
   <vscale x 1 x i8>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x h
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
   <vscale x 2 x i8>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x h
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
   <vscale x 4 x i8>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x h
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
   <vscale x 8 x i8>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
   <vscale x 16 x i8>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
   <vscale x 32 x i8>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
   <vscale x 1 x i16>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
   <vscale x 2 x i16>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
   <vscale x 4 x i16>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
   <vscale x 8 x i16>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
   <vscale x 16 x i16>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
   <vscale x 1 x i32>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
   <vscale x 2 x i32>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
   <vscale x 4 x i32>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
   <vscale x 8 x i32>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
deleted file mode 100644
index f8419e81f7d06..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
index fa1767202c126..d46c29f3be78b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
deleted file mode 100644
index ab407427952ac..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmadd.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
index e0d33062322b8..44810af5ab31b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
deleted file mode 100644
index 58e489618bc4e..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
index 834938c7d6e74..ff1bcfa86d3a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
deleted file mode 100644
index 67dbb5a92dfa3..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
+++ /dev/null
@@ -1,1106 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i32 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i32 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i32 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i32 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfnmsub.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
index 07b23dbfb066d..e6ca32f34752c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x half> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x half> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 2 x half> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x half> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x half> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -686,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 8 x half> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -732,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x half> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 16 x half> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -778,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x half> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x float> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 2 x float> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -870,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 4 x float> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -916,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -925,9 +927,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 8 x float> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -962,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -971,9 +973,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 1 x double> %0,
     double %1,
     <vscale x 1 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -994,9 +996,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1008,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -1017,9 +1019,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 2 x double> %0,
     double %1,
     <vscale x 2 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -1040,9 +1042,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1054,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x double> %0,
     double %1,
     <vscale x 4 x double> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -1086,9 +1088,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1100,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
deleted file mode 100644
index ccdd6ad371896..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
+++ /dev/null
@@ -1,677 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
index 1d502c84b1981..58dc39b995055 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -266,7 +268,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -274,9 +276,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -286,7 +288,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -296,10 +298,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -311,7 +313,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -319,9 +321,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -331,7 +333,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -341,10 +343,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -356,7 +358,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -364,9 +366,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -376,7 +378,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -386,10 +388,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -401,7 +403,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -409,9 +411,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -421,7 +423,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -431,10 +433,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -446,7 +448,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -454,9 +456,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -466,7 +468,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -476,10 +478,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -491,7 +493,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -499,9 +501,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -511,7 +513,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -521,10 +523,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -544,9 +546,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -556,7 +558,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -566,10 +568,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -581,7 +583,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -589,9 +591,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -601,7 +603,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -611,10 +613,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -634,9 +636,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -646,7 +648,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -656,10 +658,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -671,7 +673,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
deleted file mode 100644
index 4e0fe7b9fc62f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
index 30897b95deea2..3be9f912d7f2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x hal
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x hal
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x hal
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x hal
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x fl
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x fl
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x fl
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x fl
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x d
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x d
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x d
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x d
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
deleted file mode 100644
index bb173e91ecf31..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
index 25ed3f1ab3676..0a2d72bf382aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 1 x half>,
   <vscale x 4 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 2 x half>,
   <vscale x 4 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 8 x half>,
   <vscale x 4 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 16 x half>,
   <vscale x 4 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 32 x half>,
   <vscale x 4 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 1 x float>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 4 x float>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 8 x float>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 16 x float>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
   <vscale x 2 x double>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
   <vscale x 4 x double>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
   <vscale x 8 x double>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
deleted file mode 100644
index d04ef7a6707de..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
index 9561be7b6fc09..4d0301d3485ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 1 x half>,
   <vscale x 4 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 2 x half>,
   <vscale x 4 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 8 x half>,
   <vscale x 4 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 16 x half>,
   <vscale x 4 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 32 x half>,
   <vscale x 4 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 1 x float>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 4 x float>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 8 x float>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 16 x float>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
   <vscale x 2 x double>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
   <vscale x 4 x double>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
   <vscale x 8 x double>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
deleted file mode 100644
index 8c42e43d9094d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
index 1f1e68e0dbc9b..b814315d90cd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 1 x half>,
   <vscale x 4 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 2 x half>,
   <vscale x 4 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 8 x half>,
   <vscale x 4 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 16 x half>,
   <vscale x 4 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 32 x half>,
   <vscale x 4 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 1 x float>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 4 x float>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 8 x float>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1
   <vscale x 16 x float>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
   <vscale x 2 x double>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
   <vscale x 4 x double>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
   <vscale x 8 x double>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
deleted file mode 100644
index 9264397afa0e7..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
index 7b3691c74887c..e6ff649ab398c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 1 x half>,
   <vscale x 4 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 2 x half>,
   <vscale x 4 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 8 x half>,
   <vscale x 4 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 16 x half>,
   <vscale x 4 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 32 x half>,
   <vscale x 4 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x half> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 1 x float>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 4 x float>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 8 x float>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1
   <vscale x 16 x float>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
   <vscale x 2 x double>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
   <vscale x 4 x double>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
   <vscale x 8 x double>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
deleted file mode 100644
index cf9d7a4c0af5a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
+++ /dev/null
@@ -1,617 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
-    <vscale x 32 x half> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
-    <vscale x 16 x float> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
-    <vscale x 1 x double> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
-    <vscale x 2 x double> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
-    <vscale x 4 x double> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
-    <vscale x 8 x double> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll
index d0198a85b0c52..a521b6c2f2b57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x h
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -23,10 +25,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -37,16 +39,16 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -55,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x h
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -64,10 +66,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -78,16 +80,16 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -96,7 +98,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x h
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -105,10 +107,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -119,16 +121,16 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -137,7 +139,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x h
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -160,16 +162,16 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -178,7 +180,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -187,10 +189,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,16 +203,16 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -219,7 +221,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
     <vscale x 32 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -228,10 +230,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -242,16 +244,16 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -260,7 +262,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -283,16 +285,16 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -301,7 +303,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -324,16 +326,16 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -342,7 +344,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -351,10 +353,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -365,16 +367,16 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -383,7 +385,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -392,10 +394,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -406,16 +408,16 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -424,7 +426,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 1
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
     <vscale x 16 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -447,16 +449,16 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -465,7 +467,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
     <vscale x 1 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -474,10 +476,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -488,16 +490,16 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -506,7 +508,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
     <vscale x 2 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -515,10 +517,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -529,16 +531,16 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -547,7 +549,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
     <vscale x 4 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -570,16 +572,16 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
     <vscale x 8 x double> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -597,10 +599,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -611,7 +613,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
deleted file mode 100644
index 0477554c91419..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
+++ /dev/null
@@ -1,678 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfrsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
index eab5b2a414e18..3fb281562088c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -266,7 +268,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -274,9 +276,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -286,7 +288,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -296,10 +298,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -311,7 +313,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -319,9 +321,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -331,7 +333,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -341,10 +343,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -356,7 +358,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -364,9 +366,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -376,7 +378,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -386,10 +388,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -401,7 +403,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -409,9 +411,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -421,7 +423,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -431,10 +433,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -446,7 +448,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -454,9 +456,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -466,7 +468,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -476,10 +478,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -491,7 +493,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -499,9 +501,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -511,7 +513,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -521,10 +523,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -544,9 +546,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -556,7 +558,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -566,10 +568,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -581,7 +583,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -589,9 +591,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -601,7 +603,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -611,10 +613,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -634,9 +636,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -646,7 +648,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -656,10 +658,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -671,7 +673,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
deleted file mode 100644
index d71fb8fb25352..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
index c0e999a2433d0..65a5592775cb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
deleted file mode 100644
index f751fd7406389..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
index 0287f9ea2cbf3..f16c8a6db1fb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
deleted file mode 100644
index 4ae69f0d4f613..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
+++ /dev/null
@@ -1,1355 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
index fba4eceb23fb6..edfd578ce8aa0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
deleted file mode 100644
index a0ba31ea26686..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
+++ /dev/null
@@ -1,677 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
similarity index 85%
rename from llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
index 5baadc48d857e..6cbba483a8d9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -266,7 +268,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -274,9 +276,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -286,7 +288,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -296,10 +298,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -311,7 +313,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -319,9 +321,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -331,7 +333,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -341,10 +343,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -356,7 +358,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -364,9 +366,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -376,7 +378,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -386,10 +388,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -401,7 +403,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -409,9 +411,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -421,7 +423,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -431,10 +433,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -446,7 +448,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -454,9 +456,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -466,7 +468,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -476,10 +478,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -491,7 +493,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -499,9 +501,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -511,7 +513,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -521,10 +523,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -544,9 +546,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -556,7 +558,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -566,10 +568,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -581,7 +583,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -589,9 +591,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -601,7 +603,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -611,10 +613,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -634,9 +636,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -646,7 +648,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -656,10 +658,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -671,7 +673,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
deleted file mode 100644
index 4b7d1fe55e244..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
index 271bf70522bfa..695cf7aab3f6c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -27,10 +29,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -50,9 +52,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -63,7 +65,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -73,10 +75,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -96,9 +98,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -109,7 +111,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -119,10 +121,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -142,9 +144,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -155,7 +157,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -165,10 +167,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -188,9 +190,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,7 +203,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -211,10 +213,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -234,9 +236,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -247,7 +249,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -257,10 +259,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -280,9 +282,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -293,7 +295,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -303,10 +305,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -326,9 +328,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -339,7 +341,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -349,10 +351,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -372,9 +374,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -385,7 +387,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -395,10 +397,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -418,9 +420,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -431,7 +433,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -441,10 +443,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -477,7 +479,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -487,10 +489,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -510,9 +512,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -523,7 +525,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -533,10 +535,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -556,9 +558,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -569,7 +571,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -579,10 +581,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -602,9 +604,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -615,7 +617,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -625,10 +627,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -648,9 +650,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -661,7 +663,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -671,10 +673,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
deleted file mode 100644
index 3b86fd763f3c9..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
+++ /dev/null
@@ -1,548 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
-    <vscale x 1 x half> %0,
-    i32 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
-    <vscale x 2 x half> %0,
-    i32 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
-    <vscale x 4 x half> %0,
-    i32 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
-    <vscale x 8 x half> %0,
-    i32 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
-    <vscale x 16 x half> %0,
-    i32 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
-  <vscale x 32 x half>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
-    <vscale x 32 x half> %0,
-    i32 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
-    <vscale x 1 x float> %0,
-    i32 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
-    <vscale x 2 x float> %0,
-    i32 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
-    <vscale x 4 x float> %0,
-    i32 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
-    <vscale x 8 x float> %0,
-    i32 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
-    <vscale x 16 x float> %0,
-    i32 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
-    <vscale x 1 x double> %0,
-    i32 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
-    <vscale x 2 x double> %0,
-    i32 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
-    <vscale x 4 x double> %0,
-    i32 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
-    <vscale x 8 x double> %0,
-    i32 %1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
index c810a516f3b35..d944375d645cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(
+define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 1 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
     <vscale x 1 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -25,45 +25,39 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 1 x half> %0,
-  <vscale x 1 x half> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(
+define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 2 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
     <vscale x 2 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -72,45 +66,39 @@ declare <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(
+define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 2 x half> %0,
-  <vscale x 2 x half> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(
+define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 4 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
     <vscale x 4 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -119,45 +107,39 @@ declare <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(
+define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 4 x half> %0,
-  <vscale x 4 x half> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(
+define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 8 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
     <vscale x 8 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -166,45 +148,39 @@ declare <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(
+define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 8 x half> %0,
-  <vscale x 8 x half> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(
+define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 16 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
     <vscale x 16 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -213,45 +189,39 @@ declare <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(
+define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 16 x half> %0,
-  <vscale x 16 x half> %1,
-  <vscale x 16 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
   <vscale x 32 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(
+define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 32 x half> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
     <vscale x 32 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -260,45 +230,39 @@ declare <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16(
+define <vscale x 32 x half> @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 32 x half> %0,
-  <vscale x 32 x half> %1,
-  <vscale x 32 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(
+define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 1 x float> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
     <vscale x 1 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -307,45 +271,39 @@ declare <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(
+define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 1 x float> %0,
-  <vscale x 1 x float> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(
+define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 2 x float> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
     <vscale x 2 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -354,45 +312,39 @@ declare <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(
+define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 2 x float> %0,
-  <vscale x 2 x float> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(
+define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 4 x float> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
     <vscale x 4 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -401,45 +353,39 @@ declare <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(
+define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 4 x float> %0,
-  <vscale x 4 x float> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(
+define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 8 x float> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
     <vscale x 8 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -448,45 +394,39 @@ declare <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(
+define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 8 x float> %0,
-  <vscale x 8 x float> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
   <vscale x 16 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(
+define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 16 x float> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
     <vscale x 16 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -495,45 +435,39 @@ declare <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32(
+define <vscale x 16 x float> @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 16 x float> %0,
-  <vscale x 16 x float> %1,
-  <vscale x 16 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
   <vscale x 1 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(
+define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 1 x double> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
     <vscale x 1 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -542,45 +476,39 @@ declare <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(
+define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 1 x double> %0,
-  <vscale x 1 x double> %1,
-  <vscale x 1 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
   <vscale x 2 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(
+define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 2 x double> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
     <vscale x 2 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -589,45 +517,39 @@ declare <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(
+define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 2 x double> %0,
-  <vscale x 2 x double> %1,
-  <vscale x 2 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
   <vscale x 4 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(
+define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 4 x double> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
     <vscale x 4 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,45 +558,39 @@ declare <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(
+define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 4 x double> %0,
-  <vscale x 4 x double> %1,
-  <vscale x 4 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
   <vscale x 8 x double>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(
+define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  <vscale x 8 x double> %0,
-  i64 %1) nounwind {
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
     <vscale x 8 x double> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -683,25 +599,21 @@ declare <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64(
+define <vscale x 8 x double> @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  <vscale x 8 x double> %0,
-  <vscale x 8 x double> %1,
-  <vscale x 8 x i1> %2,
-  i64 %3) nounwind {
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
deleted file mode 100644
index 7445cfb806d43..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
+++ /dev/null
@@ -1,1356 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
-  <vscale x 32 x half>,
-  half,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  half,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    half %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
-  <vscale x 16 x float>,
-  float,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  float,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    float %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
-  <vscale x 1 x double>,
-  double,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  double,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    double %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
-  <vscale x 2 x double>,
-  double,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  double,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    double %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
-  <vscale x 4 x double>,
-  double,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  double,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    double %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
-  <vscale x 8 x double>,
-  double,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    double %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  double,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    double %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfsub.ll
index 86371c1685fc0..645fb340ffa30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -221,7 +223,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -229,9 +231,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -241,7 +243,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -251,10 +253,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -267,7 +269,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -402,7 +404,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -410,9 +412,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -422,7 +424,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -432,10 +434,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -447,7 +449,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -455,9 +457,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -467,7 +469,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -477,10 +479,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -493,7 +495,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -674,7 +676,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -682,9 +684,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -694,7 +696,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -704,10 +706,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -719,7 +721,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -727,9 +729,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -739,7 +741,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -772,9 +774,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -784,7 +786,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -794,10 +796,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -817,9 +819,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -829,7 +831,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -839,10 +841,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -854,7 +856,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -862,9 +864,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -874,7 +876,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -884,10 +886,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -899,7 +901,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -907,9 +909,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -919,7 +921,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -929,10 +931,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -944,7 +946,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -952,9 +954,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -964,7 +966,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -974,10 +976,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -989,7 +991,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -997,9 +999,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1009,7 +1011,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1019,10 +1021,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1042,9 +1044,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1064,10 +1066,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1079,7 +1081,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1087,9 +1089,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1099,7 +1101,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1109,10 +1111,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1144,7 +1146,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1154,10 +1156,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -1169,7 +1171,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1177,9 +1179,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1189,7 +1191,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1199,10 +1201,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1244,10 +1246,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -1259,7 +1261,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1267,9 +1269,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1279,7 +1281,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1289,10 +1291,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -1304,7 +1306,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1312,9 +1314,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1324,7 +1326,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
     double %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1334,10 +1336,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -1349,7 +1351,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
deleted file mode 100644
index a04b9a54b9306..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
index a3bdcc4573287..541f2b8564f28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -27,10 +29,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -50,9 +52,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -63,7 +65,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -73,10 +75,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -96,9 +98,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -109,7 +111,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -119,10 +121,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -142,9 +144,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -155,7 +157,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -165,10 +167,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -188,9 +190,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,7 +203,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -211,10 +213,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -234,9 +236,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -247,7 +249,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -257,10 +259,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -280,9 +282,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -293,7 +295,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -303,10 +305,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -326,9 +328,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -339,7 +341,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -349,10 +351,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -372,9 +374,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -385,7 +387,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -395,10 +397,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -418,9 +420,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -431,7 +433,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -441,10 +443,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -477,7 +479,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -487,10 +489,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -510,9 +512,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -523,7 +525,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -533,10 +535,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -556,9 +558,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -569,7 +571,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -579,10 +581,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -602,9 +604,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -615,7 +617,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -625,10 +627,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -648,9 +650,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -661,7 +663,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -671,10 +673,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -694,9 +696,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -707,7 +709,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -717,10 +719,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -732,7 +734,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -740,9 +742,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -753,7 +755,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -763,10 +765,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -778,7 +780,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -786,9 +788,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -799,7 +801,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -809,10 +811,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -824,7 +826,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
deleted file mode 100644
index 3586ec64e9dfe..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ /dev/null
@@ -1,1248 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %1,
-    <vscale x 1 x half> %0,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %1,
-    <vscale x 2 x half> %0,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %1,
-    <vscale x 4 x half> %0,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %1,
-    <vscale x 8 x half> %0,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %1,
-    <vscale x 1 x float> %0,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwadd.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %1,
-    <vscale x 2 x float> %0,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwadd.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %1,
-    <vscale x 4 x float> %0,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwadd.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %1,
-    <vscale x 8 x float> %0,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
index 3d046d2ba8057..28cdfbf621b3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -222,7 +224,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -230,9 +232,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -242,7 +244,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -252,10 +254,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -267,7 +269,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -403,7 +405,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -411,9 +413,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -423,7 +425,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -448,7 +450,7 @@ entry:
     <vscale x 1 x float> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -456,9 +458,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -468,7 +470,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -478,10 +480,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -493,7 +495,7 @@ entry:
     <vscale x 2 x float> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 4 x float> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 8 x float> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 16 x float> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -673,7 +675,7 @@ entry:
     <vscale x 1 x double> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -681,9 +683,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -693,7 +695,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -703,10 +705,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -718,7 +720,7 @@ entry:
     <vscale x 2 x double> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -726,9 +728,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -738,7 +740,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -748,10 +750,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -763,7 +765,7 @@ entry:
     <vscale x 4 x double> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -771,9 +773,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -783,7 +785,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -793,10 +795,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -808,12 +810,12 @@ entry:
     <vscale x 8 x double> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -825,12 +827,12 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -842,12 +844,12 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -859,12 +861,12 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -876,12 +878,12 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -893,12 +895,12 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -910,12 +912,12 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -927,12 +929,12 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -944,12 +946,12 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -961,12 +963,12 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -978,12 +980,12 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -995,12 +997,12 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1012,12 +1014,12 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1029,12 +1031,12 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1046,12 +1048,12 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1063,12 +1065,12 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1080,12 +1082,12 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1097,12 +1099,12 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1114,12 +1116,12 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1130,12 +1132,12 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1146,12 +1148,12 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1162,12 +1164,12 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1178,12 +1180,12 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1194,12 +1196,12 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1210,12 +1212,12 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1226,12 +1228,12 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1242,7 +1244,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
deleted file mode 100644
index e050090b7761c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
index 460488388b5ea..386fc9a4822ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
deleted file mode 100644
index e294cbe085f73..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
-    <vscale x 1 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
-    <vscale x 2 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
-    <vscale x 4 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
-    <vscale x 8 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
-    <vscale x 16 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
-    <vscale x 32 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
-    <vscale x 16 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll
index 467b64c20fedd..ad4a3a4a5eb6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 1
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 3
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
deleted file mode 100644
index 107813b7879bd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
+++ /dev/null
@@ -1,632 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
-    <vscale x 1 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
-    <vscale x 2 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
-    <vscale x 4 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
-    <vscale x 8 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
-    <vscale x 16 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
-    <vscale x 32 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
-    <vscale x 16 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll
index fc0af066c39ec..9eef34d4de1e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
 declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
 declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
 declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
 declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
 declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
 
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
 declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
 declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,16 +376,16 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
 declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -393,7 +395,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -402,10 +404,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -416,16 +418,16 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
 declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -435,7 +437,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -444,10 +446,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -458,16 +460,16 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
 declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -477,7 +479,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -486,10 +488,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -500,16 +502,16 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
 declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -519,7 +521,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -528,10 +530,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -542,16 +544,16 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
 declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -561,7 +563,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -570,10 +572,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -584,16 +586,16 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
 declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -603,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -612,10 +614,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -626,7 +628,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
deleted file mode 100644
index 4d551f62ec52f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll
index d6cf1b3563105..0f7a46aadfd15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
   <vscale x 1 x i32>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
   <vscale x 2 x i32>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
   <vscale x 4 x i32>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
   <vscale x 8 x i32>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
   <vscale x 16 x i32>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
   <vscale x 1 x i64>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
   <vscale x 2 x i64>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
   <vscale x 4 x i64>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
   <vscale x 8 x i64>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
deleted file mode 100644
index c419e08471ca1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
-    <vscale x 1 x half> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
-    <vscale x 2 x half> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
-    <vscale x 4 x half> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
-    <vscale x 8 x half> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
-    <vscale x 16 x half> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
-    <vscale x 1 x float> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
-    <vscale x 2 x float> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
-    <vscale x 4 x float> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
-    <vscale x 8 x float> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll
index 6b881aba8f652..f3d786a37fbe4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
   <vscale x 1 x i32>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
   <vscale x 2 x i32>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
   <vscale x 4 x i32>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
   <vscale x 8 x i32>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscal
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16
   <vscale x 16 x i32>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
   <vscale x 1 x i64>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
   <vscale x 2 x i64>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
   <vscale x 4 x i64>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
   <vscale x 8 x i64>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
deleted file mode 100644
index fd01c64df0d36..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll
index b8d88e1c64e55..6c3c2d7702eb1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
   <vscale x 1 x i32>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
   <vscale x 2 x i32>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
   <vscale x 4 x i32>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
   <vscale x 8 x i32>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 1
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
   <vscale x 16 x i32>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
   <vscale x 1 x i64>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
   <vscale x 2 x i64>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
   <vscale x 4 x i64>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
   <vscale x 8 x i64>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
deleted file mode 100644
index dc461d60b0be9..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
+++ /dev/null
@@ -1,380 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
-    <vscale x 1 x half> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
-    <vscale x 2 x half> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
-    <vscale x 4 x half> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
-    <vscale x 8 x half> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
-    <vscale x 16 x half> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
-    <vscale x 1 x float> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
-    <vscale x 2 x float> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
-    <vscale x 4 x float> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
-    <vscale x 8 x float> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll
index c2b0a222709e4..10bd22304ed83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -24,10 +26,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
   <vscale x 1 x i32>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -38,16 +40,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -66,10 +68,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
   <vscale x 2 x i32>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -80,16 +82,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -108,10 +110,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
   <vscale x 4 x i32>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -122,16 +124,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -150,10 +152,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
   <vscale x 8 x i32>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -164,16 +166,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -192,10 +194,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
   <vscale x 16 x i32>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -206,16 +208,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -234,10 +236,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
   <vscale x 1 x i64>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -248,16 +250,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -276,10 +278,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
   <vscale x 2 x i64>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -290,16 +292,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -309,7 +311,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
   <vscale x 4 x i64>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,16 +334,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -351,7 +353,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
   <vscale x 8 x i64>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -374,7 +376,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
deleted file mode 100644
index eb21c54c18e90..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmacc.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
index 0de121cb3f002..f5db61b5e8c7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -456,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -502,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -686,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -732,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -778,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
deleted file mode 100644
index b2e1e235e9695..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwmsac.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
index 82c4fad996e71..884ee36575b4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -456,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -502,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -686,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -732,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -778,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
deleted file mode 100644
index 670c79975a2e0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwmul.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
index fc7d8dcb59e31..b1ec8464047e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -27,10 +29,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -50,9 +52,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -63,7 +65,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -73,10 +75,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -96,9 +98,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -109,7 +111,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -119,10 +121,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -142,9 +144,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -155,7 +157,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -165,10 +167,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -188,9 +190,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,7 +203,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -211,10 +213,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -234,9 +236,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -247,7 +249,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -257,10 +259,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -280,9 +282,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -293,7 +295,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -303,10 +305,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -326,9 +328,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -339,7 +341,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -349,10 +351,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -372,9 +374,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -385,7 +387,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -395,10 +397,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -418,9 +420,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -431,7 +433,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -441,10 +443,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -477,7 +479,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -487,10 +489,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -510,9 +512,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -523,7 +525,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -533,10 +535,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -556,9 +558,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -569,7 +571,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -579,10 +581,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -602,9 +604,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -615,7 +617,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -625,10 +627,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -648,9 +650,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -661,7 +663,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -671,10 +673,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -694,9 +696,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -707,7 +709,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -717,10 +719,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -732,7 +734,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -740,9 +742,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -753,7 +755,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -763,10 +765,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -778,7 +780,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -786,9 +788,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -799,7 +801,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -809,10 +811,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -824,7 +826,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
deleted file mode 100644
index ff2b40cfac2cd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmacc.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
index 02842609f5685..4ccd0f8c55835 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -456,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -502,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -686,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -732,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -778,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
deleted file mode 100644
index 2fe370bb1d82f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwnmsac.vf v8, fa0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
index fe9683ed15adb..26fcb06d89167 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -456,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x float> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -502,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -548,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x float> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -594,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x float> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x half> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -640,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x float> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -686,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -732,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x double> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -778,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x double> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -824,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
deleted file mode 100644
index 2282bd5fbc860..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
+++ /dev/null
@@ -1,508 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
index 37240159bd908..7eb16dd4b8a80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32
   <vscale x 1 x half>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32
   <vscale x 2 x half>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
   <vscale x 2 x float>,
   <vscale x 4 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32
   <vscale x 4 x half>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
   <vscale x 2 x float>,
   <vscale x 8 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32
   <vscale x 8 x half>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
   <vscale x 2 x float>,
   <vscale x 16 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3
   <vscale x 16 x half>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 2 x float>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6
   <vscale x 1 x float>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
   <vscale x 1 x double>,
   <vscale x 2 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6
   <vscale x 2 x float>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
   <vscale x 1 x double>,
   <vscale x 4 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6
   <vscale x 4 x float>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
   <vscale x 1 x double>,
   <vscale x 8 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6
   <vscale x 8 x float>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
   <vscale x 1 x double>,
   <vscale x 16 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f
   <vscale x 16 x float>,
   <vscale x 1 x double>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll
deleted file mode 100644
index 52bde877eb7d1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll
+++ /dev/null
@@ -1,508 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
index fe56d0c6bd0d0..897cd61fb437a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32
   <vscale x 1 x half>,
   <vscale x 2 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32
   <vscale x 2 x half>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
   <vscale x 2 x float>,
   <vscale x 4 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32
   <vscale x 4 x half>,
   <vscale x 2 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
   <vscale x 2 x float>,
   <vscale x 8 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32
   <vscale x 8 x half>,
   <vscale x 2 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
   <vscale x 2 x float>,
   <vscale x 16 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f3
   <vscale x 16 x half>,
   <vscale x 2 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 2 x float>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x float> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f6
   <vscale x 1 x float>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
   <vscale x 1 x double>,
   <vscale x 2 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f6
   <vscale x 2 x float>,
   <vscale x 1 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
   <vscale x 1 x double>,
   <vscale x 4 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f6
   <vscale x 4 x float>,
   <vscale x 1 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
   <vscale x 1 x double>,
   <vscale x 8 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f6
   <vscale x 8 x float>,
   <vscale x 1 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
   <vscale x 1 x double>,
   <vscale x 16 x float>,
   <vscale x 1 x double>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f
   <vscale x 16 x float>,
   <vscale x 1 x double>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
deleted file mode 100644
index d4b0780f03c57..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x half>,
-  half,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  half,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x half>,
-  half,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  half,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x half>,
-  half,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  half,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x half>,
-  half,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  half,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x half>,
-  half,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x half> %0,
-    half %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  half,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x float>,
-  float,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v9, v8, fa0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  float,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x float>,
-  float,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vf v10, v8, fa0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  float,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x float>,
-  float,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v12, v8, fa0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  float,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x float>,
-  float,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v16, v8, fa0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x float> %0,
-    float %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  float,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.vf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
index e81121f848ddb..916abcae0de0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -27,10 +29,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -50,9 +52,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -63,7 +65,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -73,10 +75,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -96,9 +98,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -109,7 +111,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -119,10 +121,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -142,9 +144,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -155,7 +157,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -165,10 +167,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -188,9 +190,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -201,7 +203,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -211,10 +213,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -234,9 +236,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -247,7 +249,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -257,10 +259,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -280,9 +282,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -293,7 +295,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -303,10 +305,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -326,9 +328,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -339,7 +341,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -349,10 +351,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -372,9 +374,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -385,7 +387,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -395,10 +397,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -418,9 +420,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -431,7 +433,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -441,10 +443,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -477,7 +479,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -487,10 +489,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -510,9 +512,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -523,7 +525,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -533,10 +535,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -556,9 +558,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -569,7 +571,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -579,10 +581,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -602,9 +604,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -615,7 +617,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -625,10 +627,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -648,9 +650,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -661,7 +663,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -671,10 +673,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -694,9 +696,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -707,7 +709,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -717,10 +719,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -732,7 +734,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -740,9 +742,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -753,7 +755,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -763,10 +765,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -778,7 +780,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -786,9 +788,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -799,7 +801,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -809,10 +811,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -824,7 +826,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
deleted file mode 100644
index da2290be93d27..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ /dev/null
@@ -1,1248 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=ilp32d < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
-  <vscale x 1 x float>,
-  half,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  half,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    half %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
-  <vscale x 2 x float>,
-  half,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  half,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    half %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
-  <vscale x 4 x float>,
-  half,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  half,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    half %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
-  <vscale x 8 x float>,
-  half,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  half,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    half %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
-  <vscale x 16 x float>,
-  half,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    half %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  half,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    half %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
-  <vscale x 1 x double>,
-  float,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  float,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v9, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    float %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
-  <vscale x 2 x double>,
-  float,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  float,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v10, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    float %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
-  <vscale x 4 x double>,
-  float,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  float,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v12, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    float %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
-  <vscale x 8 x double>,
-  float,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    float %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  float,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v16, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    float %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %0,
-    half %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %0,
-    half %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %0,
-    half %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %0,
-    half %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %0,
-    half %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %0,
-    float %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %0,
-    float %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %0,
-    float %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %0,
-    float %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
-    <vscale x 1 x float> %1,
-    <vscale x 1 x half> %0,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %1,
-    <vscale x 2 x half> %0,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
-    <vscale x 4 x float> %1,
-    <vscale x 4 x half> %0,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
-    <vscale x 8 x float> %1,
-    <vscale x 8 x half> %0,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %1,
-    <vscale x 1 x float> %0,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwsub.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
-    <vscale x 2 x double> %1,
-    <vscale x 2 x float> %0,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfwsub.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
-    <vscale x 4 x double> %1,
-    <vscale x 4 x float> %0,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfwsub.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
-    <vscale x 8 x double> %1,
-    <vscale x 8 x float> %0,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
index ec0bd527dafe6..b5d008c3e1ed5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
-; RUN:   -target-abi=lp64d < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -41,7 +43,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -49,9 +51,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -71,10 +73,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -86,7 +88,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -94,9 +96,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -106,7 +108,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -116,10 +118,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -131,7 +133,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -139,9 +141,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -151,7 +153,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -161,10 +163,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -176,7 +178,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -206,10 +208,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -222,7 +224,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -230,9 +232,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -242,7 +244,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -252,10 +254,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -267,7 +269,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -275,9 +277,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -287,7 +289,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -297,10 +299,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -312,7 +314,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -320,9 +322,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -332,7 +334,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -342,10 +344,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -357,7 +359,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -365,9 +367,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -377,7 +379,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -387,10 +389,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -403,7 +405,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -411,9 +413,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -423,7 +425,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -433,10 +435,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -448,7 +450,7 @@ entry:
     <vscale x 1 x float> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -456,9 +458,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -468,7 +470,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -478,10 +480,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
   <vscale x 2 x float>,
   half,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -493,7 +495,7 @@ entry:
     <vscale x 2 x float> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -501,9 +503,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -513,7 +515,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -523,10 +525,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
   <vscale x 4 x float>,
   half,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -538,7 +540,7 @@ entry:
     <vscale x 4 x float> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -546,9 +548,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -558,7 +560,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -568,10 +570,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
   <vscale x 8 x float>,
   half,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -583,7 +585,7 @@ entry:
     <vscale x 8 x float> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -591,9 +593,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
-  i64);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -603,7 +605,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
     half %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -613,10 +615,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
   <vscale x 16 x float>,
   half,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -628,7 +630,7 @@ entry:
     <vscale x 16 x float> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -636,9 +638,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -648,7 +650,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -658,10 +660,10 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
   <vscale x 1 x double>,
   float,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -673,7 +675,7 @@ entry:
     <vscale x 1 x double> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -681,9 +683,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -693,7 +695,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -703,10 +705,10 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
   <vscale x 2 x double>,
   float,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -718,7 +720,7 @@ entry:
     <vscale x 2 x double> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -726,9 +728,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -738,7 +740,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -748,10 +750,10 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
   <vscale x 4 x double>,
   float,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -763,7 +765,7 @@ entry:
     <vscale x 4 x double> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -771,9 +773,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
-  i64);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -783,7 +785,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
     float %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -793,10 +795,10 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
   <vscale x 8 x double>,
   float,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -808,12 +810,12 @@ entry:
     <vscale x 8 x double> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -825,12 +827,12 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -842,12 +844,12 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -859,12 +861,12 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -876,12 +878,12 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -893,12 +895,12 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -910,12 +912,12 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -927,12 +929,12 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -944,12 +946,12 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -961,12 +963,12 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -978,12 +980,12 @@ entry:
     <vscale x 1 x float> %0,
     half %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -995,12 +997,12 @@ entry:
     <vscale x 2 x float> %0,
     half %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1012,12 +1014,12 @@ entry:
     <vscale x 4 x float> %0,
     half %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1029,12 +1031,12 @@ entry:
     <vscale x 8 x float> %0,
     half %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1046,12 +1048,12 @@ entry:
     <vscale x 16 x float> %0,
     half %1,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1063,12 +1065,12 @@ entry:
     <vscale x 1 x double> %0,
     float %1,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1080,12 +1082,12 @@ entry:
     <vscale x 2 x double> %0,
     float %1,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1097,12 +1099,12 @@ entry:
     <vscale x 4 x double> %0,
     float %1,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1114,12 +1116,12 @@ entry:
     <vscale x 8 x double> %0,
     float %1,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1130,12 +1132,12 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1146,12 +1148,12 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1162,12 +1164,12 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1178,12 +1180,12 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1194,12 +1196,12 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1210,12 +1212,12 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1226,12 +1228,12 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1242,7 +1244,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %0,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }


        


More information about the llvm-commits mailing list