[llvm] 2f726c2 - [RISCV] Merge rv32/rv64 vector slideup and slidedown intrinsic tests that have the same content. NFC.

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 25 22:15:12 PDT 2023


Author: Jim Lin
Date: 2023-07-26T13:13:55+08:00
New Revision: 2f726c22cecc59b2730397843c9b0bbd839fa6ee

URL: https://github.com/llvm/llvm-project/commit/2f726c22cecc59b2730397843c9b0bbd839fa6ee
DIFF: https://github.com/llvm/llvm-project/commit/2f726c22cecc59b2730397843c9b0bbd839fa6ee.diff

LOG: [RISCV] Merge rv32/rv64 vector slideup and slidedown intrinsic tests that have the same content. NFC.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
deleted file mode 100644
index c5265d3a47389e..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ /dev/null
@@ -1,2492 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  <vscale x 32 x i1>,
-  i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i32,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i32,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i32,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i32,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32,
-  i32,
-  i32
-);
-
-define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i32,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
index 5b7710b9967179..4e83272266c482 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
@@ -1,15 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,9 +20,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,11 +30,11 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -43,14 +44,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -60,14 +61,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -77,9 +78,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -87,12 +88,11 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -102,9 +102,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -112,11 +112,11 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -126,14 +126,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -143,14 +143,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -160,9 +160,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -170,12 +170,11 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -185,9 +184,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -195,11 +194,11 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -209,14 +208,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -226,14 +225,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -243,9 +242,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -253,12 +252,11 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -268,9 +266,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -278,11 +276,11 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -292,14 +290,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -309,14 +307,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -326,9 +324,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -336,12 +334,11 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -351,9 +348,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -361,11 +358,11 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -375,14 +372,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -392,14 +389,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -409,9 +406,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -419,12 +416,11 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -434,9 +430,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -444,11 +440,11 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -458,14 +454,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -475,14 +471,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -492,9 +488,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,12 +498,11 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -517,9 +512,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -527,11 +522,11 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -541,14 +536,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -558,14 +553,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -575,9 +570,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -585,12 +580,11 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -600,9 +594,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -610,11 +604,11 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -624,14 +618,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -641,14 +635,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -658,9 +652,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -668,12 +662,11 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -683,9 +676,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -693,11 +686,11 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -707,14 +700,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -724,14 +717,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -741,9 +734,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -751,12 +744,11 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -766,9 +758,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -776,11 +768,11 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -790,14 +782,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -807,14 +799,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -824,9 +816,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -834,12 +826,11 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -849,9 +840,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -859,11 +850,11 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -873,14 +864,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -890,14 +881,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -907,9 +898,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -917,12 +908,11 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -932,9 +922,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -942,11 +932,11 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -956,14 +946,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -973,14 +963,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -990,9 +980,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1000,12 +990,11 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1015,9 +1004,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1025,11 +1014,11 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1039,14 +1028,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -1056,14 +1045,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -1073,9 +1062,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1083,12 +1072,11 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1098,9 +1086,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1108,11 +1096,11 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1122,14 +1110,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -1139,14 +1127,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -1156,9 +1144,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1166,12 +1154,11 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1181,9 +1168,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1191,11 +1178,11 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1205,14 +1192,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -1222,14 +1209,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -1239,9 +1226,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1249,12 +1236,11 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1264,9 +1250,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1274,11 +1260,11 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -1288,14 +1274,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -1305,14 +1291,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1322,9 +1308,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1332,12 +1318,11 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1347,9 +1332,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1357,11 +1342,11 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
@@ -1371,14 +1356,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -1388,14 +1373,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1405,9 +1390,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1415,12 +1400,11 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1430,9 +1414,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1440,11 +1424,11 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
@@ -1454,14 +1438,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -1471,14 +1455,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1488,9 +1472,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1498,12 +1482,11 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1513,9 +1496,9 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1523,11 +1506,11 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1537,14 +1520,14 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -1554,14 +1537,14 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -1571,9 +1554,9 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1581,12 +1564,11 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1596,9 +1578,9 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1606,11 +1588,11 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1620,14 +1602,14 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -1637,14 +1619,14 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -1654,9 +1636,9 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1664,12 +1646,11 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1679,9 +1660,9 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1689,11 +1670,11 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1703,14 +1684,14 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -1720,14 +1701,14 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -1737,9 +1718,9 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1747,12 +1728,11 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1762,9 +1742,9 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1772,11 +1752,11 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1786,14 +1766,14 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -1803,14 +1783,14 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -1820,9 +1800,9 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1830,12 +1810,11 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1845,9 +1824,9 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1855,11 +1834,11 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1869,14 +1848,14 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -1886,14 +1865,14 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -1903,9 +1882,9 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1913,12 +1892,11 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1928,9 +1906,9 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1938,11 +1916,11 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1952,14 +1930,14 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -1969,14 +1947,14 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -1986,9 +1964,9 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1996,12 +1974,11 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2011,9 +1988,9 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -2021,11 +1998,11 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -2035,14 +2012,14 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2052,14 +2029,14 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -2069,9 +2046,9 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2079,12 +2056,11 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2094,9 +2070,9 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2104,11 +2080,11 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -2118,14 +2094,14 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2135,14 +2111,14 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -2152,9 +2128,9 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2162,12 +2138,11 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2177,9 +2152,9 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2187,11 +2162,11 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -2201,14 +2176,14 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2218,14 +2193,14 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -2235,9 +2210,9 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2245,12 +2220,11 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2260,9 +2234,9 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2270,11 +2244,11 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -2284,14 +2258,14 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2301,14 +2275,14 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -2318,9 +2292,9 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2328,12 +2302,11 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2343,9 +2316,9 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2353,11 +2326,11 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
@@ -2367,14 +2340,14 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2384,14 +2357,14 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -2401,9 +2374,9 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2411,12 +2384,11 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64,
-  i64,
-  i64
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2426,9 +2398,9 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2436,11 +2408,11 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
@@ -2450,14 +2422,14 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2467,14 +2439,14 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -2484,9 +2456,9 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
deleted file mode 100644
index b89eca77a1b440..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ /dev/null
@@ -1,2492 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  i64,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  i64,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  i64,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  i64,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64,
-  i64,
-  i64
-);
-
-define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2,
-    i64 %3,
-    i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  i64,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 9,
-    i64 %2,
-    i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vslideup.ll
index 89b57222cf6f91..729ca01e55c7ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll
@@ -1,15 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,9 +20,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,11 +30,11 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -43,14 +44,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -60,14 +61,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -77,9 +78,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -87,12 +88,11 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -102,9 +102,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -112,11 +112,11 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -126,14 +126,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -143,14 +143,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -160,9 +160,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -170,12 +170,11 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -185,9 +184,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -195,11 +194,11 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -209,14 +208,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -226,14 +225,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -243,9 +242,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -253,12 +252,11 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -268,9 +266,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -278,11 +276,11 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -292,14 +290,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -309,14 +307,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -326,9 +324,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -336,12 +334,11 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -351,9 +348,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -361,11 +358,11 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -375,14 +372,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -392,14 +389,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -409,9 +406,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -419,12 +416,11 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -434,9 +430,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -444,11 +440,11 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -458,14 +454,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -475,14 +471,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -492,9 +488,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,12 +498,11 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -517,9 +512,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -527,11 +522,11 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -541,14 +536,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -558,14 +553,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -575,9 +570,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -585,12 +580,11 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -600,9 +594,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -610,11 +604,11 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -624,14 +618,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -641,14 +635,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -658,9 +652,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -668,12 +662,11 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -683,9 +676,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -693,11 +686,11 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -707,14 +700,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -724,14 +717,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -741,9 +734,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -751,12 +744,11 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -766,9 +758,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -776,11 +768,11 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -790,14 +782,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -807,14 +799,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -824,9 +816,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -834,12 +826,11 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -849,9 +840,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -859,11 +850,11 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -873,14 +864,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -890,14 +881,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -907,9 +898,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -917,12 +908,11 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -932,9 +922,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -942,11 +932,11 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -956,14 +946,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -973,14 +963,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -990,9 +980,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1000,12 +990,11 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1015,9 +1004,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1025,11 +1014,11 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1039,14 +1028,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -1056,14 +1045,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -1073,9 +1062,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1083,12 +1072,11 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1098,9 +1086,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1108,11 +1096,11 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1122,14 +1110,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -1139,14 +1127,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -1156,9 +1144,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1166,12 +1154,11 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1181,9 +1168,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1191,11 +1178,11 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1205,14 +1192,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -1222,14 +1209,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -1239,9 +1226,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1249,12 +1236,11 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1264,9 +1250,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1274,11 +1260,11 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -1288,14 +1274,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -1305,14 +1291,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -1322,9 +1308,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1332,12 +1318,11 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1347,9 +1332,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1357,11 +1342,11 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
@@ -1371,14 +1356,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -1388,14 +1373,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -1405,9 +1390,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1415,12 +1400,11 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1430,9 +1414,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1440,11 +1424,11 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
@@ -1454,14 +1438,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -1471,14 +1455,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -1488,9 +1472,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1498,12 +1482,11 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1513,9 +1496,9 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1523,11 +1506,11 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1537,14 +1520,14 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -1554,14 +1537,14 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -1571,9 +1554,9 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1581,12 +1564,11 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1596,9 +1578,9 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1606,11 +1588,11 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1620,14 +1602,14 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -1637,14 +1619,14 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -1654,9 +1636,9 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1664,12 +1646,11 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1679,9 +1660,9 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1689,11 +1670,11 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1703,14 +1684,14 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -1720,14 +1701,14 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -1737,9 +1718,9 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1747,12 +1728,11 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1762,9 +1742,9 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1772,11 +1752,11 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1786,14 +1766,14 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -1803,14 +1783,14 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -1820,9 +1800,9 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1830,12 +1810,11 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1845,9 +1824,9 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1855,11 +1834,11 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1869,14 +1848,14 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -1886,14 +1865,14 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -1903,9 +1882,9 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1913,12 +1892,11 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1928,9 +1906,9 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1938,11 +1916,11 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1952,14 +1930,14 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -1969,14 +1947,14 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -1986,9 +1964,9 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1996,12 +1974,11 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2011,9 +1988,9 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -2021,11 +1998,11 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -2035,14 +2012,14 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2052,14 +2029,14 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -2069,9 +2046,9 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2079,12 +2056,11 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2094,9 +2070,9 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2104,11 +2080,11 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -2118,14 +2094,14 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2135,14 +2111,14 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -2152,9 +2128,9 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2162,12 +2138,11 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2177,9 +2152,9 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2187,11 +2162,11 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -2201,14 +2176,14 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2218,14 +2193,14 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -2235,9 +2210,9 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2245,12 +2220,11 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2260,9 +2234,9 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2270,11 +2244,11 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -2284,14 +2258,14 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2301,14 +2275,14 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -2318,9 +2292,9 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2328,12 +2302,11 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2343,9 +2316,9 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2353,11 +2326,11 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
@@ -2367,14 +2340,14 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2384,14 +2357,14 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -2401,9 +2374,9 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2411,12 +2384,11 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32,
-  i32,
-  i32
-);
+  iXLen,
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2426,9 +2398,9 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2,
-    i32 %3,
-    i32 1)
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2436,11 +2408,11 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
@@ -2450,14 +2422,14 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2467,14 +2439,14 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 9,
-    i32 %2,
-    i32 1)
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -2484,9 +2456,9 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }


        


More information about the llvm-commits mailing list