[llvm] 9998b00 - [RISCV] Update RVV shift intrinsic tests to use XLEN bit as shift amount.

Zakk Chen via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 17 10:48:10 PDT 2021


Author: Zakk Chen
Date: 2021-03-17T10:47:49-07:00
New Revision: 9998b00c2ecd480c54d3fe806c4011439e41b065

URL: https://github.com/llvm/llvm-project/commit/9998b00c2ecd480c54d3fe806c4011439e41b065
DIFF: https://github.com/llvm/llvm-project/commit/9998b00c2ecd480c54d3fe806c4011439e41b065.diff

LOG: [RISCV] Update RVV shift intrinsic tests to use XLEN bit as shift amount.

Fix the unexpected of using op1's element type as shift amount type.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D98501

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
index faafb67c43b9..c59e3656c5cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -496,858 +496,858 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
index 88d41f660048..9a9644ea46c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -676,1170 +676,1170 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
   <vscale x 1 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
   <vscale x 2 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
   <vscale x 4 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
   <vscale x 8 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
index d0ad6e3d1cb8..4ab2aa7c1681 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -496,858 +496,858 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
index 3d943da5af51..fd21d8b90bc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -676,1170 +676,1170 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
   <vscale x 1 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
   <vscale x 2 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
   <vscale x 4 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
   <vscale x 8 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
index 5c3f1b95fb85..8d3df148a05c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
@@ -496,1352 +496,860 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsra.wv v25, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vnsra.wv v25, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vnsra.wv v26, v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vnsra.wv v28, v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsra.wx v25, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vnsra.wx v25, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vnsra.wx v26, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vnsra.wx v28, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i16> %a
 }
-
-define <vscale x 1 x i32> @intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsra.wi v25, v8, 9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vnsra.wi v25, v8, 9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vnsra.wi v26, v8, 9
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vnsra.wi v28, v8, 9
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
index df274711cabb..30f11f8f7cc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
@@ -676,1170 +676,1170 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
   <vscale x 1 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
   <vscale x 2 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
   <vscale x 4 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
   <vscale x 8 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
index ceeb0bf80ba6..8774a8949122 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
@@ -496,1352 +496,860 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsrl.wv v25, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vnsrl.wv v25, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vnsrl.wv v26, v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vnsrl.wv v28, v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsrl.wx v25, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vnsrl.wx v25, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vnsrl.wx v26, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vnsrl.wx v28, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
   ret <vscale x 16 x i16> %a
 }
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vnsrl.wi v25, v8, 9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vnsrl.wi v25, v8, 9
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vnsrl.wi v26, v8, 9
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vnsrl.wi v28, v8, 9
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
index 27a1b59df81e..d049ca6c59b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
@@ -676,1170 +676,1170 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
   <vscale x 1 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
   <vscale x 2 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
   <vscale x 4 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
   <vscale x 8 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
   <vscale x 16 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
   <vscale x 32 x i16>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
   <vscale x 1 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
   <vscale x 2 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
   <vscale x 4 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
   <vscale x 8 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
   <vscale x 16 x i32>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
   <vscale x 1 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
   <vscale x 2 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
   <vscale x 4 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
   <vscale x 8 x i64>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8:
+define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8:
+define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8:
+define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8:
+define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8:
+define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8:
+define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16:
+define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16:
+define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16:
+define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16:
+define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16:
+define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32:
+define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32:
+define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32:
+define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32:
+define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
index 0e2be1ad194f..a4427aab309f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -796,591 +796,591 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i32,
   <vscale x 64 x i1>,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 64 x i1> %3,
     i32 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1388,21 +1388,21 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -1412,19 +1412,19 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1432,21 +1432,21 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -1456,19 +1456,19 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1476,21 +1476,21 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -1500,19 +1500,19 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1520,21 +1520,21 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -1544,19 +1544,19 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
   <vscale x 16 x i32>,
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1564,21 +1564,21 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -1595,9 +1595,9 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1610,10 +1610,10 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1627,9 +1627,9 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1642,10 +1642,10 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1659,9 +1659,9 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
@@ -1674,10 +1674,10 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1691,9 +1691,9 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
@@ -1706,10 +1706,10 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1723,9 +1723,9 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
@@ -1738,10 +1738,10 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1755,9 +1755,9 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
@@ -1770,10 +1770,10 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1787,9 +1787,9 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -1802,10 +1802,10 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -1819,9 +1819,9 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
@@ -1834,10 +1834,10 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1851,9 +1851,9 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
@@ -1866,10 +1866,10 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1883,9 +1883,9 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
@@ -1898,10 +1898,10 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1915,9 +1915,9 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
@@ -1930,10 +1930,10 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1947,9 +1947,9 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
@@ -1962,10 +1962,10 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1979,9 +1979,9 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
@@ -1994,10 +1994,10 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2011,7 +2011,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -2026,7 +2026,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -2043,7 +2043,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -2058,7 +2058,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -2075,7 +2075,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -2090,7 +2090,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -2107,7 +2107,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -2122,7 +2122,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -2139,7 +2139,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -2154,7 +2154,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
index f0356fb8100c..9e6c6255f9a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -973,811 +973,811 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i64,
   <vscale x 64 x i1>,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 64 x i1> %3,
     i64 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
   <vscale x 1 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
   <vscale x 2 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
   <vscale x 4 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
   <vscale x 8 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
   <vscale x 16 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1785,21 +1785,21 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -1809,19 +1809,19 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1829,21 +1829,21 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -1853,19 +1853,19 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1873,21 +1873,21 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -1897,19 +1897,19 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
   <vscale x 8 x i64>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1917,21 +1917,21 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -1948,9 +1948,9 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1963,10 +1963,10 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -1980,9 +1980,9 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1995,10 +1995,10 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2012,9 +2012,9 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2027,10 +2027,10 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2044,9 +2044,9 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2059,10 +2059,10 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2076,9 +2076,9 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2091,10 +2091,10 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2108,9 +2108,9 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2123,10 +2123,10 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2140,9 +2140,9 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2155,10 +2155,10 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 64 x i1> %2,
     i64 %3)
 
@@ -2172,9 +2172,9 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2187,10 +2187,10 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2204,9 +2204,9 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2219,10 +2219,10 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2236,9 +2236,9 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2251,10 +2251,10 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2268,9 +2268,9 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2283,10 +2283,10 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2300,9 +2300,9 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2315,10 +2315,10 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2332,9 +2332,9 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2347,10 +2347,10 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2364,9 +2364,9 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
@@ -2379,10 +2379,10 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2396,9 +2396,9 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
@@ -2411,10 +2411,10 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2428,9 +2428,9 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
@@ -2443,10 +2443,10 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2460,9 +2460,9 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
@@ -2475,10 +2475,10 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2492,9 +2492,9 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
@@ -2507,10 +2507,10 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2524,7 +2524,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -2539,7 +2539,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -2556,7 +2556,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -2571,7 +2571,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -2588,7 +2588,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -2603,7 +2603,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -2620,7 +2620,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
index 1a48652e1863..1fe52e37a984 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -796,591 +796,591 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i32,
   <vscale x 64 x i1>,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 64 x i1> %3,
     i32 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1388,21 +1388,21 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -1412,19 +1412,19 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1432,21 +1432,21 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -1456,19 +1456,19 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1476,21 +1476,21 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -1500,19 +1500,19 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1520,21 +1520,21 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -1544,19 +1544,19 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
   <vscale x 16 x i32>,
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1564,21 +1564,21 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -1595,9 +1595,9 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1610,10 +1610,10 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1627,9 +1627,9 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1642,10 +1642,10 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1659,9 +1659,9 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
@@ -1674,10 +1674,10 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1691,9 +1691,9 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
@@ -1706,10 +1706,10 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1723,9 +1723,9 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
@@ -1738,10 +1738,10 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1755,9 +1755,9 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
@@ -1770,10 +1770,10 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1787,9 +1787,9 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -1802,10 +1802,10 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -1819,9 +1819,9 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
@@ -1834,10 +1834,10 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1851,9 +1851,9 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
@@ -1866,10 +1866,10 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1883,9 +1883,9 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
@@ -1898,10 +1898,10 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1915,9 +1915,9 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
@@ -1930,10 +1930,10 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1947,9 +1947,9 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
@@ -1962,10 +1962,10 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1979,9 +1979,9 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
@@ -1994,10 +1994,10 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2011,7 +2011,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -2026,7 +2026,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -2043,7 +2043,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -2058,7 +2058,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -2075,7 +2075,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -2090,7 +2090,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -2107,7 +2107,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -2122,7 +2122,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -2139,7 +2139,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -2154,7 +2154,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
index 7c1f4cf78776..3e078673e18c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -973,811 +973,811 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i64,
   <vscale x 64 x i1>,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 64 x i1> %3,
     i64 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
   <vscale x 1 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
   <vscale x 2 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
   <vscale x 4 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
   <vscale x 8 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
   <vscale x 16 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1785,21 +1785,21 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -1809,19 +1809,19 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1829,21 +1829,21 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -1853,19 +1853,19 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1873,21 +1873,21 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -1897,19 +1897,19 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
   <vscale x 8 x i64>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1917,21 +1917,21 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -1948,9 +1948,9 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1963,10 +1963,10 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -1980,9 +1980,9 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1995,10 +1995,10 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2012,9 +2012,9 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2027,10 +2027,10 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2044,9 +2044,9 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2059,10 +2059,10 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2076,9 +2076,9 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2091,10 +2091,10 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2108,9 +2108,9 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2123,10 +2123,10 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2140,9 +2140,9 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2155,10 +2155,10 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 64 x i1> %2,
     i64 %3)
 
@@ -2172,9 +2172,9 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2187,10 +2187,10 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2204,9 +2204,9 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2219,10 +2219,10 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2236,9 +2236,9 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2251,10 +2251,10 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2268,9 +2268,9 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2283,10 +2283,10 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2300,9 +2300,9 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2315,10 +2315,10 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2332,9 +2332,9 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2347,10 +2347,10 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2364,9 +2364,9 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
@@ -2379,10 +2379,10 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2396,9 +2396,9 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
@@ -2411,10 +2411,10 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2428,9 +2428,9 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
@@ -2443,10 +2443,10 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2460,9 +2460,9 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
@@ -2475,10 +2475,10 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2492,9 +2492,9 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
@@ -2507,10 +2507,10 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2524,7 +2524,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -2539,7 +2539,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -2556,7 +2556,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -2571,7 +2571,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -2588,7 +2588,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -2603,7 +2603,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -2620,7 +2620,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
index eba2d4bd383b..5ad8dea25c01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -796,591 +796,591 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i32,
   <vscale x 64 x i1>,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 64 x i1> %3,
     i32 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1388,21 +1388,21 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -1412,19 +1412,19 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1432,21 +1432,21 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -1456,19 +1456,19 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1476,21 +1476,21 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -1500,19 +1500,19 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1520,21 +1520,21 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -1544,19 +1544,19 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
   <vscale x 16 x i32>,
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1564,21 +1564,21 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -1595,9 +1595,9 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1610,10 +1610,10 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1627,9 +1627,9 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1642,10 +1642,10 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1659,9 +1659,9 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
@@ -1674,10 +1674,10 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1691,9 +1691,9 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
@@ -1706,10 +1706,10 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1723,9 +1723,9 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
@@ -1738,10 +1738,10 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1755,9 +1755,9 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
@@ -1770,10 +1770,10 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1787,9 +1787,9 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -1802,10 +1802,10 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -1819,9 +1819,9 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
@@ -1834,10 +1834,10 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1851,9 +1851,9 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
@@ -1866,10 +1866,10 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1883,9 +1883,9 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
@@ -1898,10 +1898,10 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1915,9 +1915,9 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
@@ -1930,10 +1930,10 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1947,9 +1947,9 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
@@ -1962,10 +1962,10 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1979,9 +1979,9 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
@@ -1994,10 +1994,10 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2011,7 +2011,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -2026,7 +2026,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -2043,7 +2043,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -2058,7 +2058,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -2075,7 +2075,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -2090,7 +2090,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -2107,7 +2107,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -2122,7 +2122,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -2139,7 +2139,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -2154,7 +2154,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
index 58e6ef3d730c..e32a30a12254 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -973,811 +973,811 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i64,
   <vscale x 64 x i1>,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 64 x i1> %3,
     i64 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
   <vscale x 1 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
   <vscale x 2 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
   <vscale x 4 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
   <vscale x 8 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
   <vscale x 16 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1785,21 +1785,21 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -1809,19 +1809,19 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1829,21 +1829,21 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -1853,19 +1853,19 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1873,21 +1873,21 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -1897,19 +1897,19 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
   <vscale x 8 x i64>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1917,21 +1917,21 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -1948,9 +1948,9 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1963,10 +1963,10 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -1980,9 +1980,9 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1995,10 +1995,10 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2012,9 +2012,9 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2027,10 +2027,10 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2044,9 +2044,9 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2059,10 +2059,10 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2076,9 +2076,9 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2091,10 +2091,10 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2108,9 +2108,9 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2123,10 +2123,10 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2140,9 +2140,9 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2155,10 +2155,10 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 64 x i1> %2,
     i64 %3)
 
@@ -2172,9 +2172,9 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2187,10 +2187,10 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2204,9 +2204,9 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2219,10 +2219,10 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2236,9 +2236,9 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2251,10 +2251,10 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2268,9 +2268,9 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2283,10 +2283,10 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2300,9 +2300,9 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2315,10 +2315,10 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2332,9 +2332,9 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2347,10 +2347,10 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2364,9 +2364,9 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
@@ -2379,10 +2379,10 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2396,9 +2396,9 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
@@ -2411,10 +2411,10 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2428,9 +2428,9 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
@@ -2443,10 +2443,10 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2460,9 +2460,9 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
@@ -2475,10 +2475,10 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2492,9 +2492,9 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
@@ -2507,10 +2507,10 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2524,7 +2524,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -2539,7 +2539,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -2556,7 +2556,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -2571,7 +2571,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -2588,7 +2588,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -2603,7 +2603,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -2620,7 +2620,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 549e1cfd4691..945b72f957e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -796,768 +796,591 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i32,
   <vscale x 64 x i1>,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 64 x i1> %3,
     i32 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1565,21 +1388,21 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -1589,19 +1412,19 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1609,21 +1432,21 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -1633,19 +1456,19 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1653,21 +1476,21 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -1677,19 +1500,19 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1697,21 +1520,21 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -1721,19 +1544,19 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
   <vscale x 16 x i32>,
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1741,21 +1564,21 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -1765,246 +1588,176 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
   <vscale x 1 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v25, v25, a1
-; CHECK-NEXT:    vmv.v.x v26, a0
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vor.vv v25, v26, v25
-; CHECK-NEXT:    vssra.vv v8, v8, v25
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v25, v25, a1
-; CHECK-NEXT:    vmv.v.x v26, a0
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vor.vv v25, v26, v25
-; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v9, v25, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
   <vscale x 2 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vmv.v.x v28, a0
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vor.vv v26, v28, v26
-; CHECK-NEXT:    vssra.vv v8, v8, v26
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vmv.v.x v28, a0
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vor.vv v26, v28, v26
-; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v10, v26, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
   <vscale x 4 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
-; CHECK-NEXT:    vmv.v.x v28, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vmv.v.x v12, a0
-; CHECK-NEXT:    vsll.vx v12, v12, a1
-; CHECK-NEXT:    vsrl.vx v12, v12, a1
-; CHECK-NEXT:    vor.vv v28, v12, v28
-; CHECK-NEXT:    vssra.vv v8, v8, v28
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
-; CHECK-NEXT:    vmv.v.x v28, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vmv.v.x v16, a0
-; CHECK-NEXT:    vsll.vx v16, v16, a1
-; CHECK-NEXT:    vsrl.vx v16, v16, a1
-; CHECK-NEXT:    vor.vv v28, v16, v28
-; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
-; CHECK-NEXT:    vssra.vv v8, v12, v28, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
   <vscale x 8 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
-; CHECK-NEXT:    vmv.v.x v16, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v16, v16, a1
-; CHECK-NEXT:    vmv.v.x v24, a0
-; CHECK-NEXT:    vsll.vx v24, v24, a1
-; CHECK-NEXT:    vsrl.vx v24, v24, a1
-; CHECK-NEXT:    vor.vv v16, v24, v16
-; CHECK-NEXT:    vssra.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    csrrs a3, vlenb, zero
-; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, sp, 16
-; CHECK-NEXT:    vs1r.v v0, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
-; CHECK-NEXT:    vmv.v.x v24, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v0, v24, a1
-; CHECK-NEXT:    vmv.v.x v24, a0
-; CHECK-NEXT:    vsll.vx v24, v24, a1
-; CHECK-NEXT:    vsrl.vx v24, v24, a1
-; CHECK-NEXT:    vor.vv v24, v24, v0
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl1re8.v v0, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    csrrs a0, vlenb, zero
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
@@ -2018,9 +1771,9 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
@@ -2033,10 +1786,10 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -2050,9 +1803,9 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
@@ -2065,10 +1818,10 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -2082,9 +1835,9 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2097,10 +1850,10 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -2114,9 +1867,9 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2129,10 +1882,10 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -2146,9 +1899,9 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2161,10 +1914,10 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -2178,9 +1931,9 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2193,10 +1946,10 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2210,9 +1963,9 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2225,10 +1978,10 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -2242,9 +1995,9 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2257,10 +2010,10 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -2274,9 +2027,9 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2289,10 +2042,10 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -2306,9 +2059,9 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2321,10 +2074,10 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -2338,9 +2091,9 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2353,10 +2106,10 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -2370,9 +2123,9 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2385,10 +2138,10 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -2402,9 +2155,9 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2417,10 +2170,10 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2434,7 +2187,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -2449,7 +2202,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -2466,7 +2219,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -2481,7 +2234,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -2498,7 +2251,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -2513,7 +2266,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -2530,7 +2283,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -2545,7 +2298,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -2562,7 +2315,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -2577,7 +2330,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,
@@ -2586,131 +2339,3 @@ entry:
 
   ret <vscale x 16 x i32> %a
 }
-
-define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
-; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
-; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
-; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
-; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index 6aaef72bc7fa..a7ac6f16fd80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -973,811 +973,811 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i64,
   <vscale x 64 x i1>,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 64 x i1> %3,
     i64 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
   <vscale x 1 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
   <vscale x 2 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
   <vscale x 4 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
   <vscale x 8 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
   <vscale x 16 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1785,21 +1785,21 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -1809,19 +1809,19 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1829,21 +1829,21 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -1853,19 +1853,19 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1873,21 +1873,21 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -1897,19 +1897,19 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
   <vscale x 8 x i64>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1917,21 +1917,21 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -1948,9 +1948,9 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1963,10 +1963,10 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -1980,9 +1980,9 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1995,10 +1995,10 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2012,9 +2012,9 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2027,10 +2027,10 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2044,9 +2044,9 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2059,10 +2059,10 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2076,9 +2076,9 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2091,10 +2091,10 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2108,9 +2108,9 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2123,10 +2123,10 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2140,9 +2140,9 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2155,10 +2155,10 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 64 x i1> %2,
     i64 %3)
 
@@ -2172,9 +2172,9 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2187,10 +2187,10 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2204,9 +2204,9 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2219,10 +2219,10 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2236,9 +2236,9 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2251,10 +2251,10 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2268,9 +2268,9 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2283,10 +2283,10 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2300,9 +2300,9 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2315,10 +2315,10 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2332,9 +2332,9 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2347,10 +2347,10 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2364,9 +2364,9 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
@@ -2379,10 +2379,10 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2396,9 +2396,9 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
@@ -2411,10 +2411,10 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2428,9 +2428,9 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
@@ -2443,10 +2443,10 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2460,9 +2460,9 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
@@ -2475,10 +2475,10 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2492,9 +2492,9 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
@@ -2507,10 +2507,10 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2524,7 +2524,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -2539,7 +2539,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -2556,7 +2556,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -2571,7 +2571,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -2588,7 +2588,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -2603,7 +2603,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -2620,7 +2620,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index e3fb735a064d..67b0e77e0ca6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -796,768 +796,591 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i32,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i32,
   <vscale x 64 x i1>,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i32 %2,
     <vscale x 64 x i1> %3,
     i32 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 16 x i1> %3,
     i32 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i32,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i32,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i32 %2,
     <vscale x 32 x i1> %3,
     i32 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1565,21 +1388,21 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -1589,19 +1412,19 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1609,21 +1432,21 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -1633,19 +1456,19 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1653,21 +1476,21 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -1677,19 +1500,19 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1697,21 +1520,21 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -1721,19 +1544,19 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
   <vscale x 16 x i32>,
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -1741,21 +1564,21 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -1765,246 +1588,176 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
   <vscale x 1 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v25, v25, a1
-; CHECK-NEXT:    vmv.v.x v26, a0
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vor.vv v25, v26, v25
-; CHECK-NEXT:    vssrl.vv v8, v8, v25
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
+  i32,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v25, v25, a1
-; CHECK-NEXT:    vmv.v.x v26, a0
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vor.vv v25, v26, v25
-; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v9, v25, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 1 x i1> %3,
     i32 %4)
 
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
   <vscale x 2 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vmv.v.x v28, a0
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vor.vv v26, v28, v26
-; CHECK-NEXT:    vssrl.vv v8, v8, v26
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
+  i32,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v26, v26, a1
-; CHECK-NEXT:    vmv.v.x v28, a0
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vor.vv v26, v28, v26
-; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v10, v26, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 2 x i1> %3,
     i32 %4)
 
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
   <vscale x 4 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
-; CHECK-NEXT:    vmv.v.x v28, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vmv.v.x v12, a0
-; CHECK-NEXT:    vsll.vx v12, v12, a1
-; CHECK-NEXT:    vsrl.vx v12, v12, a1
-; CHECK-NEXT:    vor.vv v28, v12, v28
-; CHECK-NEXT:    vssrl.vv v8, v8, v28
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
+  i32,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
-; CHECK-NEXT:    vmv.v.x v28, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v28, v28, a1
-; CHECK-NEXT:    vmv.v.x v16, a0
-; CHECK-NEXT:    vsll.vx v16, v16, a1
-; CHECK-NEXT:    vsrl.vx v16, v16, a1
-; CHECK-NEXT:    vor.vv v28, v16, v28
-; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
-; CHECK-NEXT:    vssrl.vv v8, v12, v28, v0.t
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 4 x i1> %3,
     i32 %4)
 
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
   <vscale x 8 x i64>,
-  i64,
+  i32,
   i32);
 
-define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
-; CHECK-NEXT:    vmv.v.x v16, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v16, v16, a1
-; CHECK-NEXT:    vmv.v.x v24, a0
-; CHECK-NEXT:    vsll.vx v24, v24, a1
-; CHECK-NEXT:    vsrl.vx v24, v24, a1
-; CHECK-NEXT:    vor.vv v16, v24, v16
-; CHECK-NEXT:    vssrl.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
-    i64 %1,
+    i32 %1,
     i32 %2)
 
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64,
+  i32,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    csrrs a3, vlenb, zero
-; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, sp, 16
-; CHECK-NEXT:    vs1r.v v0, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
-; CHECK-NEXT:    vmv.v.x v24, a1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsll.vx v0, v24, a1
-; CHECK-NEXT:    vmv.v.x v24, a0
-; CHECK-NEXT:    vsll.vx v24, v24, a1
-; CHECK-NEXT:    vsrl.vx v24, v24, a1
-; CHECK-NEXT:    vor.vv v24, v24, v0
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl1re8.v v0, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    csrrs a0, vlenb, zero
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2,
+    i32 %2,
     <vscale x 8 x i1> %3,
     i32 %4)
 
@@ -2018,9 +1771,9 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
@@ -2033,10 +1786,10 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -2050,9 +1803,9 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
@@ -2065,10 +1818,10 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -2082,9 +1835,9 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2097,10 +1850,10 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -2114,9 +1867,9 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2129,10 +1882,10 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -2146,9 +1899,9 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2161,10 +1914,10 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -2178,9 +1931,9 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2193,10 +1946,10 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2210,9 +1963,9 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2225,10 +1978,10 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i32 9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -2242,9 +1995,9 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2257,10 +2010,10 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -2274,9 +2027,9 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2289,10 +2042,10 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -2306,9 +2059,9 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2321,10 +2074,10 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -2338,9 +2091,9 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2353,10 +2106,10 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -2370,9 +2123,9 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2385,10 +2138,10 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -2402,9 +2155,9 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i32 9,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2417,10 +2170,10 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i32 9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -2434,7 +2187,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -2449,7 +2202,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -2466,7 +2219,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -2481,7 +2234,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -2498,7 +2251,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -2513,7 +2266,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -2530,7 +2283,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -2545,7 +2298,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -2562,7 +2315,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -2577,7 +2330,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,
@@ -2586,131 +2339,3 @@ entry:
 
   ret <vscale x 16 x i32> %a
 }
-
-define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
-; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
-; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
-; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
-; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index 369519f72cae..d6305809cb54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -973,811 +973,811 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
   <vscale x 1 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i8,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
   <vscale x 2 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i8,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
   <vscale x 4 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i8,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8:
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
   <vscale x 8 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i8,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8:
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
   <vscale x 16 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i8,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8:
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
   <vscale x 32 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i8,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8:
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
   <vscale x 64 x i8>,
-  i8,
+  i64,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i8,
+  i64,
   <vscale x 64 x i1>,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8:
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 %2,
+    i64 %2,
     <vscale x 64 x i1> %3,
     i64 %4)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
   <vscale x 1 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i16,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16:
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
   <vscale x 2 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i16,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16:
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
   <vscale x 4 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i16,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16:
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
   <vscale x 8 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i16,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16:
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
   <vscale x 16 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i16,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16:
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
   <vscale x 32 x i16>,
-  i16,
+  i64,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i16,
+  i64,
   <vscale x 32 x i1>,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16:
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 %2,
+    i64 %2,
     <vscale x 32 x i1> %3,
     i64 %4)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
   <vscale x 1 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32:
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 1 x i1> %3,
     i64 %4)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
   <vscale x 2 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32:
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 2 x i1> %3,
     i64 %4)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
   <vscale x 4 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 4 x i1> %3,
     i64 %4)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
   <vscale x 8 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 8 x i1> %3,
     i64 %4)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
   <vscale x 16 x i32>,
-  i32,
+  i64,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 %1,
+    i64 %1,
     i64 %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  i64,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32:
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    i64 %2,
     <vscale x 16 x i1> %3,
     i64 %4)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1785,21 +1785,21 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64:
+define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -1809,19 +1809,19 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1829,21 +1829,21 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64:
+define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -1853,19 +1853,19 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1873,21 +1873,21 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64:
+define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -1897,19 +1897,19 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
   <vscale x 8 x i64>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -1917,21 +1917,21 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64:
+define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -1948,9 +1948,9 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
@@ -1963,10 +1963,10 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -1980,9 +1980,9 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
@@ -1995,10 +1995,10 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2012,9 +2012,9 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
@@ -2027,10 +2027,10 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2044,9 +2044,9 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
@@ -2059,10 +2059,10 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2076,9 +2076,9 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
@@ -2091,10 +2091,10 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2108,9 +2108,9 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
@@ -2123,10 +2123,10 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2140,9 +2140,9 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
@@ -2155,10 +2155,10 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i64 9,
     <vscale x 64 x i1> %2,
     i64 %3)
 
@@ -2172,9 +2172,9 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
@@ -2187,10 +2187,10 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2204,9 +2204,9 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
@@ -2219,10 +2219,10 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2236,9 +2236,9 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
@@ -2251,10 +2251,10 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2268,9 +2268,9 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
@@ -2283,10 +2283,10 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2300,9 +2300,9 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
@@ -2315,10 +2315,10 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2332,9 +2332,9 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
@@ -2347,10 +2347,10 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i64 9,
     <vscale x 32 x i1> %2,
     i64 %3)
 
@@ -2364,9 +2364,9 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
@@ -2379,10 +2379,10 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 1 x i1> %2,
     i64 %3)
 
@@ -2396,9 +2396,9 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
@@ -2411,10 +2411,10 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 2 x i1> %2,
     i64 %3)
 
@@ -2428,9 +2428,9 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
@@ -2443,10 +2443,10 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 4 x i1> %2,
     i64 %3)
 
@@ -2460,9 +2460,9 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
@@ -2475,10 +2475,10 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 8 x i1> %2,
     i64 %3)
 
@@ -2492,9 +2492,9 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i64 9,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
@@ -2507,10 +2507,10 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i64 9,
     <vscale x 16 x i1> %2,
     i64 %3)
 
@@ -2524,7 +2524,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -2539,7 +2539,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -2556,7 +2556,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -2571,7 +2571,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -2588,7 +2588,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -2603,7 +2603,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -2620,7 +2620,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,


        


More information about the llvm-commits mailing list