[llvm] 25ff302 - [RISCV] Split vrgather intrinsics into separate vrgather.vv and vrgather.vx intrinsics.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 4 20:25:48 PST 2021


Author: Craig Topper
Date: 2021-02-04T19:50:12-08:00
New Revision: 25ff302a79f1c27ad03894e74da096ecdbe4176a

URL: https://github.com/llvm/llvm-project/commit/25ff302a79f1c27ad03894e74da096ecdbe4176a
DIFF: https://github.com/llvm/llvm-project/commit/25ff302a79f1c27ad03894e74da096ecdbe4176a.diff

LOG: [RISCV] Split vrgather intrinsics into separate vrgather.vv and vrgather.vx intrinsics.

The vrgather.vv instruction uses a vector of indices with the same
SEW as operand 0. The vrgather.vx instructions use a scalar index
operand of XLen bits.

By splitting this into 2 intrinsics we are able to use LLVMatchType
in the definition to avoid specifying the type for the index operand
when creating the IR for the intrinsic. For .vv it will match the
operand 0 type. And for .vx it will match the type of the vl operand
we already needed to specify a type for.

I'm considering splitting more intrinsics. This was a somewhat
odd one because the .vx doesn't use the element type, it always
use XLen.

Reviewed By: HsiangKai

Differential Revision: https://reviews.llvm.org/D95979

Added: 
    

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index c4056895f68e..b71ecddb0c30 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -231,12 +231,35 @@ let TargetPrefix = "riscv" in {
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // For destination vector type is the same as first and second source vector.
-  // Input: (vector_in, vector_in, vl)
-  class RISCVBinaryAAAMask
+  // Input: (vector_in, int_vector_in, vl)
+  class RISCVRGatherVVNoMask
         : Intrinsic<[llvm_anyvector_ty],
-                    [LLVMMatchType<0>, LLVMMatchType<0>,
+                    [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic;
+  // For destination vector type is the same as first and second source vector.
+  // Input: (vector_in, vector_in, int_vector_in, vl)
+  class RISCVRGatherVVMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                     [IntrNoMem]>, RISCVVIntrinsic;
+  // For destination vector type is the same as first source vector, and the
+  // second operand is XLen.
+  // Input: (vector_in, xlen_in, vl)
+  class RISCVGatherVXNoMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
+                    [IntrNoMem]>, RISCVVIntrinsic {
+  }
+  // For destination vector type is the same as first source vector (with mask).
+  // Second operand is XLen.
+  // Input: (maskedoff, vector_in, xlen_in, mask, vl)
+  class RISCVGatherVXMask
+       : Intrinsic<[llvm_anyvector_ty],
+                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
+                   [IntrNoMem]>, RISCVVIntrinsic {
+  }
   // For destination vector type is the same as first source vector.
   // Input: (vector_in, vector_in/scalar_in, vl)
   class RISCVBinaryAAXNoMask
@@ -688,6 +711,14 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
   }
+  multiclass RISCVRGatherVV {
+    def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
+    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
+  }
+  multiclass RISCVRGatherVX {
+    def "int_riscv_" # NAME : RISCVGatherVXNoMask;
+    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
+  }
   // ABX means the destination type(A) is 
diff erent from the first source
   // type(B). X means any type for the second source operand.
   multiclass RISCVBinaryABX {
@@ -965,10 +996,11 @@ let TargetPrefix = "riscv" in {
   defm vfslide1up : RISCVBinaryAAX;
   defm vfslide1down : RISCVBinaryAAX;
 
-  defm vrgather : RISCVBinaryAAX;
+  defm vrgather_vv : RISCVRGatherVV;
+  defm vrgather_vx : RISCVRGatherVX;
   defm vrgatherei16 : RISCVBinaryAAX;
 
-  def "int_riscv_vcompress" : RISCVBinaryAAAMask;
+  def "int_riscv_vcompress" : RISCVUnaryAAMask;
 
   defm vaaddu : RISCVSaturatingBinaryAAX;
   defm vaadd : RISCVSaturatingBinaryAAX;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 06d5b4b62fb8..61e1576ac544 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2933,9 +2933,9 @@ multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
 multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
                                     list<VTypeInfo> vtilist, Operand ImmType = simm5>
 {
-  defm "" : VPatBinaryV_VV_INT<intrinsic, instruction, vtilist>;
-  defm "" : VPatBinaryV_VX_INT<intrinsic, instruction, vtilist>;
-  defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
+  defm "" : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>;
+  defm "" : VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>;
+  defm "" : VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
 }
 
 multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
index 4de81b8550d1..ebe7976c6d3e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32);
@@ -14,7 +14,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2)
@@ -22,7 +22,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -36,7 +36,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
@@ -46,7 +46,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32);
@@ -59,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2)
@@ -67,7 +67,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
@@ -91,7 +91,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32);
@@ -104,7 +104,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2)
@@ -112,7 +112,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
@@ -126,7 +126,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
@@ -136,7 +136,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32);
@@ -149,7 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2)
@@ -157,7 +157,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -181,7 +181,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32);
@@ -194,7 +194,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2)
@@ -202,7 +202,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
@@ -216,7 +216,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
@@ -226,7 +226,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32);
@@ -239,7 +239,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2)
@@ -247,7 +247,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
@@ -261,7 +261,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
@@ -271,7 +271,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i32);
@@ -284,7 +284,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i32 %2)
@@ -292,7 +292,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
@@ -308,7 +308,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
@@ -318,7 +318,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32);
@@ -331,7 +331,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2)
@@ -339,7 +339,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
@@ -353,7 +353,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
@@ -363,7 +363,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32);
@@ -376,7 +376,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2)
@@ -384,7 +384,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
@@ -398,7 +398,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
@@ -408,7 +408,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32);
@@ -421,7 +421,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2)
@@ -429,7 +429,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -443,7 +443,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -453,7 +453,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32);
@@ -466,7 +466,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2)
@@ -474,7 +474,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
@@ -488,7 +488,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
@@ -498,7 +498,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32);
@@ -511,7 +511,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2)
@@ -519,7 +519,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
@@ -533,7 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
@@ -543,7 +543,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i32);
@@ -556,7 +556,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i32 %2)
@@ -564,7 +564,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
@@ -580,7 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
@@ -590,7 +590,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32);
@@ -603,7 +603,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2)
@@ -611,7 +611,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
@@ -625,7 +625,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
@@ -635,7 +635,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32);
@@ -648,7 +648,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2)
@@ -656,7 +656,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -670,7 +670,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32);
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2)
@@ -701,7 +701,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
@@ -715,7 +715,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
@@ -725,7 +725,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32);
@@ -738,7 +738,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2)
@@ -746,7 +746,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
@@ -760,7 +760,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
@@ -770,7 +770,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32);
@@ -783,7 +783,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2)
@@ -791,7 +791,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
@@ -807,7 +807,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
@@ -817,7 +817,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
   i32);
@@ -830,7 +830,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
     <vscale x 1 x half> %0,
     <vscale x 1 x i16> %1,
     i32 %2)
@@ -838,7 +838,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
@@ -852,7 +852,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i16> %2,
@@ -862,7 +862,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
   i32);
@@ -875,7 +875,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
     <vscale x 2 x half> %0,
     <vscale x 2 x i16> %1,
     i32 %2)
@@ -883,7 +883,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
@@ -897,7 +897,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i16> %2,
@@ -907,7 +907,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
   i32);
@@ -920,7 +920,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
     <vscale x 4 x half> %0,
     <vscale x 4 x i16> %1,
     i32 %2)
@@ -928,7 +928,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
@@ -942,7 +942,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i16> %2,
@@ -952,7 +952,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
   i32);
@@ -965,7 +965,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
     <vscale x 8 x half> %0,
     <vscale x 8 x i16> %1,
     i32 %2)
@@ -973,7 +973,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
@@ -987,7 +987,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i16> %2,
@@ -997,7 +997,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
   i32);
@@ -1010,7 +1010,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
     <vscale x 16 x half> %0,
     <vscale x 16 x i16> %1,
     i32 %2)
@@ -1018,7 +1018,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
@@ -1032,7 +1032,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i16> %2,
@@ -1042,7 +1042,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
   i32);
@@ -1055,7 +1055,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
     <vscale x 32 x half> %0,
     <vscale x 32 x i16> %1,
     i32 %2)
@@ -1063,7 +1063,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
@@ -1079,7 +1079,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i16> %2,
@@ -1089,7 +1089,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
   i32);
@@ -1102,7 +1102,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
     <vscale x 1 x float> %0,
     <vscale x 1 x i32> %1,
     i32 %2)
@@ -1110,7 +1110,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
@@ -1124,7 +1124,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i32> %2,
@@ -1134,7 +1134,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
   i32);
@@ -1147,7 +1147,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
     <vscale x 2 x float> %0,
     <vscale x 2 x i32> %1,
     i32 %2)
@@ -1155,7 +1155,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
@@ -1169,7 +1169,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i32> %2,
@@ -1179,7 +1179,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
   i32);
@@ -1192,7 +1192,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
     <vscale x 4 x float> %0,
     <vscale x 4 x i32> %1,
     i32 %2)
@@ -1200,7 +1200,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
@@ -1214,7 +1214,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i32> %2,
@@ -1224,7 +1224,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
   i32);
@@ -1237,7 +1237,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
     <vscale x 8 x float> %0,
     <vscale x 8 x i32> %1,
     i32 %2)
@@ -1245,7 +1245,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
@@ -1259,7 +1259,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i32> %2,
@@ -1269,7 +1269,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
   i32);
@@ -1282,7 +1282,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
     <vscale x 16 x float> %0,
     <vscale x 16 x i32> %1,
     i32 %2)
@@ -1290,7 +1290,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
@@ -1306,7 +1306,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i32> %2,
@@ -1316,7 +1316,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
   i32);
@@ -1329,7 +1329,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
     <vscale x 1 x double> %0,
     <vscale x 1 x i64> %1,
     i32 %2)
@@ -1337,7 +1337,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
@@ -1351,7 +1351,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i64> %2,
@@ -1361,7 +1361,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
   i32);
@@ -1374,7 +1374,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
     <vscale x 2 x double> %0,
     <vscale x 2 x i64> %1,
     i32 %2)
@@ -1382,7 +1382,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
@@ -1396,7 +1396,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i64> %2,
@@ -1406,7 +1406,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
   i32);
@@ -1419,7 +1419,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
     <vscale x 4 x double> %0,
     <vscale x 4 x i64> %1,
     i32 %2)
@@ -1427,7 +1427,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
@@ -1441,7 +1441,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i64> %2,
@@ -1451,7 +1451,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
   i32);
@@ -1464,7 +1464,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
     <vscale x 8 x double> %0,
     <vscale x 8 x i64> %1,
     i32 %2)
@@ -1472,7 +1472,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
@@ -1488,7 +1488,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i64> %2,
@@ -1498,7 +1498,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i32(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
   <vscale x 1 x i8>,
   i32,
   i32);
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32(<vscale x 1 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i32(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1519,7 +1519,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i32(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32,
@@ -1533,7 +1533,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i32(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2,
@@ -1543,7 +1543,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i32(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
   <vscale x 2 x i8>,
   i32,
   i32);
@@ -1556,7 +1556,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32(<vscale x 2 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i32(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1564,7 +1564,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i32(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32,
@@ -1578,7 +1578,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i32(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2,
@@ -1588,7 +1588,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i32(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
   <vscale x 4 x i8>,
   i32,
   i32);
@@ -1601,7 +1601,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32(<vscale x 4 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i32(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1609,7 +1609,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i32(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32,
@@ -1623,7 +1623,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i32(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2,
@@ -1633,7 +1633,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i32(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
   <vscale x 8 x i8>,
   i32,
   i32);
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32(<vscale x 8 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i32(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1654,7 +1654,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i32(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32,
@@ -1668,7 +1668,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i32(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2,
@@ -1678,7 +1678,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i32(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
   <vscale x 16 x i8>,
   i32,
   i32);
@@ -1691,7 +1691,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32(<vscale x 1
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i32(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1699,7 +1699,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i32(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32,
@@ -1713,7 +1713,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i32(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2,
@@ -1723,7 +1723,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i32(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
   <vscale x 32 x i8>,
   i32,
   i32);
@@ -1736,7 +1736,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32(<vscale x 3
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i32(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1744,7 +1744,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i32(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32,
@@ -1758,7 +1758,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i32(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2,
@@ -1768,7 +1768,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i32(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
   <vscale x 64 x i8>,
   i32,
   i32);
@@ -1781,7 +1781,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32(<vscale x 6
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i32(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     i32 %1,
     i32 %2)
@@ -1789,7 +1789,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i32(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i32,
@@ -1803,7 +1803,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i32(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i32 %2,
@@ -1813,7 +1813,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i32(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
   <vscale x 1 x i16>,
   i32,
   i32);
@@ -1826,7 +1826,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i32(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     i32 %1,
     i32 %2)
@@ -1834,7 +1834,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i32(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32,
@@ -1848,7 +1848,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i32(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2,
@@ -1858,7 +1858,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i32(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
   <vscale x 2 x i16>,
   i32,
   i32);
@@ -1871,7 +1871,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i32(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     i32 %1,
     i32 %2)
@@ -1879,7 +1879,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i32(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32,
@@ -1893,7 +1893,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i32(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2,
@@ -1903,7 +1903,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i32(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
   <vscale x 4 x i16>,
   i32,
   i32);
@@ -1916,7 +1916,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i32(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     i32 %1,
     i32 %2)
@@ -1924,7 +1924,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i32(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32,
@@ -1938,7 +1938,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i32(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2,
@@ -1948,7 +1948,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i32(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
   <vscale x 8 x i16>,
   i32,
   i32);
@@ -1961,7 +1961,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32(<vscale x 8
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i32(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     i32 %1,
     i32 %2)
@@ -1969,7 +1969,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i32(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32,
@@ -1983,7 +1983,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i32(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2,
@@ -1993,7 +1993,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i32(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
   <vscale x 16 x i16>,
   i32,
   i32);
@@ -2006,7 +2006,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i32(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     i32 %1,
     i32 %2)
@@ -2014,7 +2014,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i32(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32,
@@ -2028,7 +2028,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i32(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2,
@@ -2038,7 +2038,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i32(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
   <vscale x 32 x i16>,
   i32,
   i32);
@@ -2051,7 +2051,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i32(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     i32 %1,
     i32 %2)
@@ -2059,7 +2059,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i32(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i32,
@@ -2073,7 +2073,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i32(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i32 %2,
@@ -2083,7 +2083,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   i32);
@@ -2096,7 +2096,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -2104,7 +2104,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
@@ -2118,7 +2118,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
@@ -2128,7 +2128,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   i32);
@@ -2141,7 +2141,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -2149,7 +2149,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
@@ -2163,7 +2163,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
@@ -2173,7 +2173,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   i32);
@@ -2186,7 +2186,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -2194,7 +2194,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
@@ -2208,7 +2208,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
@@ -2218,7 +2218,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   i32);
@@ -2231,7 +2231,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -2239,7 +2239,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
@@ -2253,7 +2253,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
@@ -2263,7 +2263,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   i32);
@@ -2276,7 +2276,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -2284,7 +2284,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2,
@@ -2308,7 +2308,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i32(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
   <vscale x 1 x half>,
   i32,
   i32);
@@ -2321,7 +2321,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i32(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
     i32 %1,
     i32 %2)
@@ -2329,7 +2329,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i32(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32,
@@ -2343,7 +2343,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i32(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2,
@@ -2353,7 +2353,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i32(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
   <vscale x 2 x half>,
   i32,
   i32);
@@ -2366,7 +2366,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i32(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
     i32 %1,
     i32 %2)
@@ -2374,7 +2374,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i32(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32,
@@ -2388,7 +2388,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i32(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2,
@@ -2398,7 +2398,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i32(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
   <vscale x 4 x half>,
   i32,
   i32);
@@ -2411,7 +2411,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i32(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
     i32 %1,
     i32 %2)
@@ -2419,7 +2419,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i32(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32,
@@ -2433,7 +2433,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i32(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2,
@@ -2443,7 +2443,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i32(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
   <vscale x 8 x half>,
   i32,
   i32);
@@ -2456,7 +2456,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i32(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
     i32 %1,
     i32 %2)
@@ -2464,7 +2464,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i32(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32,
@@ -2478,7 +2478,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i32(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2,
@@ -2488,7 +2488,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i32(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
   <vscale x 16 x half>,
   i32,
   i32);
@@ -2501,7 +2501,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i32(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
     i32 %1,
     i32 %2)
@@ -2509,7 +2509,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i32(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32,
@@ -2523,7 +2523,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32(<v
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i32(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2,
@@ -2533,7 +2533,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i32(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
   <vscale x 32 x half>,
   i32,
   i32);
@@ -2546,7 +2546,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i32(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
     i32 %1,
     i32 %2)
@@ -2554,7 +2554,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i32(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32,
@@ -2568,7 +2568,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32(<v
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i32(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2,
@@ -2578,7 +2578,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
   <vscale x 1 x float>,
   i32,
   i32);
@@ -2591,7 +2591,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
     i32 %1,
     i32 %2)
@@ -2599,7 +2599,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32,
@@ -2613,7 +2613,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2,
@@ -2623,7 +2623,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
   <vscale x 2 x float>,
   i32,
   i32);
@@ -2636,7 +2636,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
     i32 %1,
     i32 %2)
@@ -2644,7 +2644,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32,
@@ -2658,7 +2658,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2,
@@ -2668,7 +2668,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
   <vscale x 4 x float>,
   i32,
   i32);
@@ -2681,7 +2681,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
     i32 %1,
     i32 %2)
@@ -2689,7 +2689,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32,
@@ -2703,7 +2703,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2,
@@ -2713,7 +2713,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
   <vscale x 8 x float>,
   i32,
   i32);
@@ -2726,7 +2726,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
     i32 %1,
     i32 %2)
@@ -2734,7 +2734,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32,
@@ -2748,7 +2748,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2,
@@ -2758,7 +2758,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
   <vscale x 16 x float>,
   i32,
   i32);
@@ -2771,7 +2771,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscal
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
     i32 %1,
     i32 %2)
@@ -2779,7 +2779,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32,
@@ -2793,7 +2793,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2,
@@ -2803,7 +2803,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i32(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
   <vscale x 1 x double>,
   i32,
   i32);
@@ -2816,7 +2816,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
     i32 %1,
     i32 %2)
@@ -2824,7 +2824,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i32(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i32,
@@ -2838,7 +2838,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 %2,
@@ -2848,7 +2848,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i32(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
   <vscale x 2 x double>,
   i32,
   i32);
@@ -2861,7 +2861,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
     i32 %1,
     i32 %2)
@@ -2869,7 +2869,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i32(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i32,
@@ -2883,7 +2883,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 %2,
@@ -2893,7 +2893,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i32(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
   <vscale x 4 x double>,
   i32,
   i32);
@@ -2906,7 +2906,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
     i32 %1,
     i32 %2)
@@ -2914,7 +2914,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i32(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i32,
@@ -2928,7 +2928,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 %2,
@@ -2938,7 +2938,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i32(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
   <vscale x 8 x double>,
   i32,
   i32);
@@ -2951,7 +2951,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
     i32 %1,
     i32 %2)
@@ -2959,7 +2959,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i32(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   i32,
@@ -2973,7 +2973,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     i32 %2,
@@ -2991,7 +2991,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32(<vscale x 1 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i32(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     i32 9,
     i32 %1)
@@ -3006,7 +3006,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i32(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 9,
@@ -3024,7 +3024,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32(<vscale x 2 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i32(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     i32 9,
     i32 %1)
@@ -3039,7 +3039,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i32(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 9,
@@ -3057,7 +3057,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32(<vscale x 4 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i32(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     i32 9,
     i32 %1)
@@ -3072,7 +3072,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i32(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 9,
@@ -3090,7 +3090,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32(<vscale x 8 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i32(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     i32 9,
     i32 %1)
@@ -3105,7 +3105,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i32(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 9,
@@ -3123,7 +3123,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32(<vscale x 1
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i32(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     i32 9,
     i32 %1)
@@ -3138,7 +3138,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i32(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 9,
@@ -3156,7 +3156,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32(<vscale x 3
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i32(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     i32 9,
     i32 %1)
@@ -3171,7 +3171,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i32(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 9,
@@ -3189,7 +3189,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32(<vscale x 6
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i32(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     i32 9,
     i32 %1)
@@ -3204,7 +3204,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i32(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i32 9,
@@ -3222,7 +3222,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i32(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     i32 9,
     i32 %1)
@@ -3237,7 +3237,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i32(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 9,
@@ -3255,7 +3255,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i32(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     i32 9,
     i32 %1)
@@ -3270,7 +3270,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i32(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 9,
@@ -3288,7 +3288,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i32(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     i32 9,
     i32 %1)
@@ -3303,7 +3303,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i32(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 9,
@@ -3321,7 +3321,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32(<vscale x 8
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i32(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     i32 9,
     i32 %1)
@@ -3336,7 +3336,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i32(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 9,
@@ -3354,7 +3354,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i32(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     i32 9,
     i32 %1)
@@ -3369,7 +3369,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i32(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 9,
@@ -3387,7 +3387,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i32(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     i32 9,
     i32 %1)
@@ -3402,7 +3402,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i32(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i32 9,
@@ -3420,7 +3420,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
     i32 %1)
@@ -3435,7 +3435,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
@@ -3453,7 +3453,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -3468,7 +3468,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
@@ -3486,7 +3486,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
     i32 %1)
@@ -3501,7 +3501,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
@@ -3519,7 +3519,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -3534,7 +3534,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
@@ -3552,7 +3552,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 9,
     i32 %1)
@@ -3567,7 +3567,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 9,
@@ -3585,7 +3585,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i32(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
     i32 9,
     i32 %1)
@@ -3600,7 +3600,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i32(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 9,
@@ -3618,7 +3618,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i32(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
     i32 9,
     i32 %1)
@@ -3633,7 +3633,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i32(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 9,
@@ -3651,7 +3651,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i32(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
     i32 9,
     i32 %1)
@@ -3666,7 +3666,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i32(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 9,
@@ -3684,7 +3684,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i32(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
     i32 9,
     i32 %1)
@@ -3699,7 +3699,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i32(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 9,
@@ -3717,7 +3717,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i32(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
     i32 9,
     i32 %1)
@@ -3732,7 +3732,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32(<v
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i32(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 9,
@@ -3750,7 +3750,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i32(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
     i32 9,
     i32 %1)
@@ -3765,7 +3765,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32(<v
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i32(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 9,
@@ -3783,7 +3783,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
     i32 9,
     i32 %1)
@@ -3798,7 +3798,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 9,
@@ -3816,7 +3816,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
     i32 9,
     i32 %1)
@@ -3831,7 +3831,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 9,
@@ -3849,7 +3849,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
     i32 9,
     i32 %1)
@@ -3864,7 +3864,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 9,
@@ -3882,7 +3882,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
     i32 9,
     i32 %1)
@@ -3897,7 +3897,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 9,
@@ -3915,7 +3915,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscal
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
     i32 9,
     i32 %1)
@@ -3930,7 +3930,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 9,
@@ -3948,7 +3948,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
     i32 9,
     i32 %1)
@@ -3963,7 +3963,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 9,
@@ -3981,7 +3981,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
     i32 9,
     i32 %1)
@@ -3996,7 +3996,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 9,
@@ -4014,7 +4014,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
     i32 9,
     i32 %1)
@@ -4029,7 +4029,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 9,
@@ -4047,7 +4047,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
     i32 9,
     i32 %1)
@@ -4062,7 +4062,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     i32 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
index 7bd3ed67f61f..2686c1bdccac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64);
@@ -14,7 +14,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2)
@@ -22,7 +22,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -36,7 +36,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
@@ -46,7 +46,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64);
@@ -59,7 +59,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2)
@@ -67,7 +67,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
@@ -91,7 +91,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64);
@@ -104,7 +104,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2)
@@ -112,7 +112,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
@@ -126,7 +126,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
@@ -136,7 +136,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64);
@@ -149,7 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2)
@@ -157,7 +157,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -181,7 +181,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64);
@@ -194,7 +194,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2)
@@ -202,7 +202,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
@@ -216,7 +216,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
@@ -226,7 +226,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64);
@@ -239,7 +239,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2)
@@ -247,7 +247,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
@@ -261,7 +261,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
@@ -271,7 +271,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i64);
@@ -284,7 +284,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i64 %2)
@@ -292,7 +292,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
@@ -308,7 +308,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<v
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
@@ -318,7 +318,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64);
@@ -331,7 +331,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2)
@@ -339,7 +339,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
@@ -353,7 +353,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
@@ -363,7 +363,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64);
@@ -376,7 +376,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2)
@@ -384,7 +384,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
@@ -398,7 +398,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
@@ -408,7 +408,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64);
@@ -421,7 +421,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2)
@@ -429,7 +429,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -443,7 +443,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -453,7 +453,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64);
@@ -466,7 +466,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2)
@@ -474,7 +474,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
@@ -488,7 +488,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
@@ -498,7 +498,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64);
@@ -511,7 +511,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2)
@@ -519,7 +519,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
@@ -533,7 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
@@ -543,7 +543,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i64);
@@ -556,7 +556,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i64 %2)
@@ -564,7 +564,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
@@ -580,7 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
@@ -590,7 +590,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64);
@@ -603,7 +603,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2)
@@ -611,7 +611,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
@@ -625,7 +625,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
@@ -635,7 +635,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64);
@@ -648,7 +648,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2)
@@ -656,7 +656,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -670,7 +670,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64);
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2)
@@ -701,7 +701,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
@@ -715,7 +715,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
@@ -725,7 +725,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64);
@@ -738,7 +738,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2)
@@ -746,7 +746,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
@@ -760,7 +760,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
@@ -770,7 +770,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i64);
@@ -783,7 +783,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i64 %2)
@@ -791,7 +791,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
@@ -807,7 +807,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
@@ -817,7 +817,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.nxv1i64(
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64);
@@ -830,7 +830,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.nxv1i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2)
@@ -838,7 +838,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64(
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -852,7 +852,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -862,7 +862,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.nxv2i64(
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64);
@@ -875,7 +875,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.nxv2i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2)
@@ -883,7 +883,7 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64(
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
@@ -897,7 +897,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<v
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
@@ -907,7 +907,7 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.nxv4i64(
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64);
@@ -920,7 +920,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.nxv4i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2)
@@ -928,7 +928,7 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64(
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
@@ -942,7 +942,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<v
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
@@ -952,7 +952,7 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.nxv8i64(
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64);
@@ -965,7 +965,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.nxv8i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2)
@@ -973,7 +973,7 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64(
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
@@ -989,7 +989,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<v
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
@@ -999,7 +999,7 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
   i64);
@@ -1012,7 +1012,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
     <vscale x 1 x half> %0,
     <vscale x 1 x i16> %1,
     i64 %2)
@@ -1020,7 +1020,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i16>,
@@ -1034,7 +1034,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i16> %2,
@@ -1044,7 +1044,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
   i64);
@@ -1057,7 +1057,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
     <vscale x 2 x half> %0,
     <vscale x 2 x i16> %1,
     i64 %2)
@@ -1065,7 +1065,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i16>,
@@ -1079,7 +1079,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i16> %2,
@@ -1089,7 +1089,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
   i64);
@@ -1102,7 +1102,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
     <vscale x 4 x half> %0,
     <vscale x 4 x i16> %1,
     i64 %2)
@@ -1110,7 +1110,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i16>,
@@ -1124,7 +1124,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i16> %2,
@@ -1134,7 +1134,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
   i64);
@@ -1147,7 +1147,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
     <vscale x 8 x half> %0,
     <vscale x 8 x i16> %1,
     i64 %2)
@@ -1155,7 +1155,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i16>,
@@ -1169,7 +1169,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i16> %2,
@@ -1179,7 +1179,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
   i64);
@@ -1192,7 +1192,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
     <vscale x 16 x half> %0,
     <vscale x 16 x i16> %1,
     i64 %2)
@@ -1200,7 +1200,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i16>,
@@ -1214,7 +1214,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i16> %2,
@@ -1224,7 +1224,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
   i64);
@@ -1237,7 +1237,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
     <vscale x 32 x half> %0,
     <vscale x 32 x i16> %1,
     i64 %2)
@@ -1245,7 +1245,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i16>,
@@ -1261,7 +1261,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i16> %2,
@@ -1271,7 +1271,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
   i64);
@@ -1284,7 +1284,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
     <vscale x 1 x float> %0,
     <vscale x 1 x i32> %1,
     i64 %2)
@@ -1292,7 +1292,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i32>,
@@ -1306,7 +1306,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i32> %2,
@@ -1316,7 +1316,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
   i64);
@@ -1329,7 +1329,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
     <vscale x 2 x float> %0,
     <vscale x 2 x i32> %1,
     i64 %2)
@@ -1337,7 +1337,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i32>,
@@ -1351,7 +1351,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i32> %2,
@@ -1361,7 +1361,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
   i64);
@@ -1374,7 +1374,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
     <vscale x 4 x float> %0,
     <vscale x 4 x i32> %1,
     i64 %2)
@@ -1382,7 +1382,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i32>,
@@ -1396,7 +1396,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i32> %2,
@@ -1406,7 +1406,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
   i64);
@@ -1419,7 +1419,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
     <vscale x 8 x float> %0,
     <vscale x 8 x i32> %1,
     i64 %2)
@@ -1427,7 +1427,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i32>,
@@ -1441,7 +1441,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i32> %2,
@@ -1451,7 +1451,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
   i64);
@@ -1464,7 +1464,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
     <vscale x 16 x float> %0,
     <vscale x 16 x i32> %1,
     i64 %2)
@@ -1472,7 +1472,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i32>,
@@ -1488,7 +1488,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i32> %2,
@@ -1498,7 +1498,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
   i64);
@@ -1511,7 +1511,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
     <vscale x 1 x double> %0,
     <vscale x 1 x i64> %1,
     i64 %2)
@@ -1519,7 +1519,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i64>,
@@ -1533,7 +1533,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i64> %2,
@@ -1543,7 +1543,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
   i64);
@@ -1556,7 +1556,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
     <vscale x 2 x double> %0,
     <vscale x 2 x i64> %1,
     i64 %2)
@@ -1564,7 +1564,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i64>,
@@ -1578,7 +1578,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i64> %2,
@@ -1588,7 +1588,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
   i64);
@@ -1601,7 +1601,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
     <vscale x 4 x double> %0,
     <vscale x 4 x i64> %1,
     i64 %2)
@@ -1609,7 +1609,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i64>,
@@ -1623,7 +1623,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i64> %2,
@@ -1633,7 +1633,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
   i64);
@@ -1646,7 +1646,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
     <vscale x 8 x double> %0,
     <vscale x 8 x i64> %1,
     i64 %2)
@@ -1654,7 +1654,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i64>,
@@ -1670,7 +1670,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i64> %2,
@@ -1680,7 +1680,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i64(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
   <vscale x 1 x i8>,
   i64,
   i64);
@@ -1693,7 +1693,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64(<vscale x 1 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i64(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1701,7 +1701,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i64(
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64,
@@ -1715,7 +1715,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i64(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2,
@@ -1725,7 +1725,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i64(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
   <vscale x 2 x i8>,
   i64,
   i64);
@@ -1738,7 +1738,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64(<vscale x 2 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i64(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1746,7 +1746,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i64(
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64,
@@ -1760,7 +1760,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i64(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2,
@@ -1770,7 +1770,7 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i64(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
   <vscale x 4 x i8>,
   i64,
   i64);
@@ -1783,7 +1783,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64(<vscale x 4 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i64(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1791,7 +1791,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i64(
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64,
@@ -1805,7 +1805,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i64(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2,
@@ -1815,7 +1815,7 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i64(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
   <vscale x 8 x i8>,
   i64,
   i64);
@@ -1828,7 +1828,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64(<vscale x 8 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i64(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1836,7 +1836,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i64(
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64,
@@ -1850,7 +1850,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i64(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2,
@@ -1860,7 +1860,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i64(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
   <vscale x 16 x i8>,
   i64,
   i64);
@@ -1873,7 +1873,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64(<vscale x 1
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i64(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1881,7 +1881,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i64(
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64,
@@ -1895,7 +1895,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i64(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2,
@@ -1905,7 +1905,7 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i64(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
   <vscale x 32 x i8>,
   i64,
   i64);
@@ -1918,7 +1918,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64(<vscale x 3
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i64(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1926,7 +1926,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i64(
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64,
@@ -1940,7 +1940,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i64(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2,
@@ -1950,7 +1950,7 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i64(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
   <vscale x 64 x i8>,
   i64,
   i64);
@@ -1963,7 +1963,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64(<vscale x 6
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i64(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     i64 %1,
     i64 %2)
@@ -1971,7 +1971,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i64(
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i64,
@@ -1985,7 +1985,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i64(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i64 %2,
@@ -1995,7 +1995,7 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i64(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
   <vscale x 1 x i16>,
   i64,
   i64);
@@ -2008,7 +2008,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i64(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2016,7 +2016,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i64(
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64,
@@ -2030,7 +2030,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i64(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2,
@@ -2040,7 +2040,7 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i64(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
   <vscale x 2 x i16>,
   i64,
   i64);
@@ -2053,7 +2053,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i64(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2061,7 +2061,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i64(
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64,
@@ -2075,7 +2075,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i64(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2,
@@ -2085,7 +2085,7 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i64(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
   <vscale x 4 x i16>,
   i64,
   i64);
@@ -2098,7 +2098,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i64(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2106,7 +2106,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i64(
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64,
@@ -2120,7 +2120,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i64(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2,
@@ -2130,7 +2130,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i64(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
   <vscale x 8 x i16>,
   i64,
   i64);
@@ -2143,7 +2143,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64(<vscale x 8
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i64(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2151,7 +2151,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i64(
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64,
@@ -2165,7 +2165,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i64(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2,
@@ -2175,7 +2175,7 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i64(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
   <vscale x 16 x i16>,
   i64,
   i64);
@@ -2188,7 +2188,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i64(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2196,7 +2196,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i64(
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64,
@@ -2210,7 +2210,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i64(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2,
@@ -2220,7 +2220,7 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i64(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
   <vscale x 32 x i16>,
   i64,
   i64);
@@ -2233,7 +2233,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i64(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     i64 %1,
     i64 %2)
@@ -2241,7 +2241,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i64(
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i64,
@@ -2255,7 +2255,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i64(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i64 %2,
@@ -2265,7 +2265,7 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i64(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
   <vscale x 1 x i32>,
   i64,
   i64);
@@ -2278,7 +2278,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i64(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     i64 %1,
     i64 %2)
@@ -2286,7 +2286,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i64(
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64,
@@ -2300,7 +2300,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i64(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2,
@@ -2310,7 +2310,7 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i64(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
   <vscale x 2 x i32>,
   i64,
   i64);
@@ -2323,7 +2323,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i64(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     i64 %1,
     i64 %2)
@@ -2331,7 +2331,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i64(
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64,
@@ -2345,7 +2345,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i64(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2,
@@ -2355,7 +2355,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i64(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
   <vscale x 4 x i32>,
   i64,
   i64);
@@ -2368,7 +2368,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64(<vscale x 4
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i64(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     i64 %1,
     i64 %2)
@@ -2376,7 +2376,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i64(
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64,
@@ -2390,7 +2390,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i64(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2,
@@ -2400,7 +2400,7 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i64(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
   <vscale x 8 x i32>,
   i64,
   i64);
@@ -2413,7 +2413,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64(<vscale x 8
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i64(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     i64 %1,
     i64 %2)
@@ -2421,7 +2421,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i64(
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64,
@@ -2435,7 +2435,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i64(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2,
@@ -2445,7 +2445,7 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i64(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
   <vscale x 16 x i32>,
   i64,
   i64);
@@ -2458,7 +2458,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i64(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     i64 %1,
     i64 %2)
@@ -2466,7 +2466,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i64(
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i64,
@@ -2480,7 +2480,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i64(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i64 %2,
@@ -2490,7 +2490,7 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   i64);
@@ -2503,7 +2503,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -2511,7 +2511,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
@@ -2525,7 +2525,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
@@ -2535,7 +2535,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   i64);
@@ -2548,7 +2548,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -2556,7 +2556,7 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
@@ -2570,7 +2570,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
@@ -2580,7 +2580,7 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   i64);
@@ -2593,7 +2593,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -2601,7 +2601,7 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
@@ -2615,7 +2615,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
@@ -2625,7 +2625,7 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   i64);
@@ -2638,7 +2638,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -2646,7 +2646,7 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
@@ -2660,7 +2660,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
@@ -2670,7 +2670,7 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i64(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
   <vscale x 1 x half>,
   i64,
   i64);
@@ -2683,7 +2683,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i64(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
     i64 %1,
     i64 %2)
@@ -2691,7 +2691,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i64(
+declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64,
@@ -2705,7 +2705,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i64(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2,
@@ -2715,7 +2715,7 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i64(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
   <vscale x 2 x half>,
   i64,
   i64);
@@ -2728,7 +2728,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i64(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
     i64 %1,
     i64 %2)
@@ -2736,7 +2736,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i64(
+declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64,
@@ -2750,7 +2750,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i64(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2,
@@ -2760,7 +2760,7 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i64(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
   <vscale x 4 x half>,
   i64,
   i64);
@@ -2773,7 +2773,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i64(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
     i64 %1,
     i64 %2)
@@ -2781,7 +2781,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i64(
+declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64,
@@ -2795,7 +2795,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i64(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2,
@@ -2805,7 +2805,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i64(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
   <vscale x 8 x half>,
   i64,
   i64);
@@ -2818,7 +2818,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i64(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
     i64 %1,
     i64 %2)
@@ -2826,7 +2826,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i64(
+declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64,
@@ -2840,7 +2840,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i64(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2,
@@ -2850,7 +2850,7 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i64(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
   <vscale x 16 x half>,
   i64,
   i64);
@@ -2863,7 +2863,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i64(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
     i64 %1,
     i64 %2)
@@ -2871,7 +2871,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i64(
+declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64,
@@ -2885,7 +2885,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64(<v
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i64(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2,
@@ -2895,7 +2895,7 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i64(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
   <vscale x 32 x half>,
   i64,
   i64);
@@ -2908,7 +2908,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i64(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
     i64 %1,
     i64 %2)
@@ -2916,7 +2916,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i64(
+declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i64,
@@ -2930,7 +2930,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64(<v
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i64(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i64 %2,
@@ -2940,7 +2940,7 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i64(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
   <vscale x 1 x float>,
   i64,
   i64);
@@ -2953,7 +2953,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i64(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
     i64 %1,
     i64 %2)
@@ -2961,7 +2961,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i64(
+declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64,
@@ -2975,7 +2975,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i64(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2,
@@ -2985,7 +2985,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i64(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
   <vscale x 2 x float>,
   i64,
   i64);
@@ -2998,7 +2998,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i64(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
     i64 %1,
     i64 %2)
@@ -3006,7 +3006,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i64(
+declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64,
@@ -3020,7 +3020,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i64(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2,
@@ -3030,7 +3030,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i64(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
   <vscale x 4 x float>,
   i64,
   i64);
@@ -3043,7 +3043,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i64(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
     i64 %1,
     i64 %2)
@@ -3051,7 +3051,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i64(
+declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64,
@@ -3065,7 +3065,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i64(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2,
@@ -3075,7 +3075,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i64(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
   <vscale x 8 x float>,
   i64,
   i64);
@@ -3088,7 +3088,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64(<vscale x
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i64(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
     i64 %1,
     i64 %2)
@@ -3096,7 +3096,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i64(
+declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64,
@@ -3110,7 +3110,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i64(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2,
@@ -3120,7 +3120,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i64(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
   <vscale x 16 x float>,
   i64,
   i64);
@@ -3133,7 +3133,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64(<vscal
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i64(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
     i64 %1,
     i64 %2)
@@ -3141,7 +3141,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i64(
+declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i64,
@@ -3155,7 +3155,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64(<
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i64(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i64 %2,
@@ -3165,7 +3165,7 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
   <vscale x 1 x double>,
   i64,
   i64);
@@ -3178,7 +3178,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
     i64 %1,
     i64 %2)
@@ -3186,7 +3186,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i64,
@@ -3200,7 +3200,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 %2,
@@ -3210,7 +3210,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
   <vscale x 2 x double>,
   i64,
   i64);
@@ -3223,7 +3223,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
     i64 %1,
     i64 %2)
@@ -3231,7 +3231,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i64,
@@ -3245,7 +3245,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 %2,
@@ -3255,7 +3255,7 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
   <vscale x 4 x double>,
   i64,
   i64);
@@ -3268,7 +3268,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
     i64 %1,
     i64 %2)
@@ -3276,7 +3276,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i64,
@@ -3290,7 +3290,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 %2,
@@ -3300,7 +3300,7 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
   <vscale x 8 x double>,
   i64,
   i64);
@@ -3313,7 +3313,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
     i64 %1,
     i64 %2)
@@ -3321,7 +3321,7 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   i64,
@@ -3335,7 +3335,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     i64 %2,
@@ -3353,7 +3353,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64(<vscale x 1 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i64(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     i64 9,
     i64 %1)
@@ -3368,7 +3368,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i64(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 9,
@@ -3386,7 +3386,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64(<vscale x 2 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i64(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     i64 9,
     i64 %1)
@@ -3401,7 +3401,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i64(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 9,
@@ -3419,7 +3419,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64(<vscale x 4 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i64(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     i64 9,
     i64 %1)
@@ -3434,7 +3434,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i64(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 9,
@@ -3452,7 +3452,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64(<vscale x 8 x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i64(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     i64 9,
     i64 %1)
@@ -3467,7 +3467,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64(<vscale x
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i64(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 9,
@@ -3485,7 +3485,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64(<vscale x 1
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i64(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     i64 9,
     i64 %1)
@@ -3500,7 +3500,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i64(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 9,
@@ -3518,7 +3518,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64(<vscale x 3
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i64(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     i64 9,
     i64 %1)
@@ -3533,7 +3533,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i64(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 9,
@@ -3551,7 +3551,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64(<vscale x 6
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i64(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     i64 9,
     i64 %1)
@@ -3566,7 +3566,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i64(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i64 9,
@@ -3584,7 +3584,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i64(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     i64 9,
     i64 %1)
@@ -3599,7 +3599,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i64(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 9,
@@ -3617,7 +3617,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i64(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     i64 9,
     i64 %1)
@@ -3632,7 +3632,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i64(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 9,
@@ -3650,7 +3650,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64(<vscale x 4
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i64(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     i64 9,
     i64 %1)
@@ -3665,7 +3665,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i64(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 9,
@@ -3683,7 +3683,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64(<vscale x 8
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i64(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     i64 9,
     i64 %1)
@@ -3698,7 +3698,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i64(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 9,
@@ -3716,7 +3716,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i64(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     i64 9,
     i64 %1)
@@ -3731,7 +3731,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i64(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 9,
@@ -3749,7 +3749,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i64(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     i64 9,
     i64 %1)
@@ -3764,7 +3764,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i64(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i64 9,
@@ -3782,7 +3782,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i64(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     i64 9,
     i64 %1)
@@ -3797,7 +3797,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i64(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 9,
@@ -3815,7 +3815,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64(<vscale x 2
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i64(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     i64 9,
     i64 %1)
@@ -3830,7 +3830,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i64(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 9,
@@ -3848,7 +3848,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64(<vscale x 4
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i64(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     i64 9,
     i64 %1)
@@ -3863,7 +3863,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i64(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 9,
@@ -3881,7 +3881,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64(<vscale x 8
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i64(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     i64 9,
     i64 %1)
@@ -3896,7 +3896,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i64(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 9,
@@ -3914,7 +3914,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i64(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     i64 9,
     i64 %1)
@@ -3929,7 +3929,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i64(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i64 9,
@@ -3947,7 +3947,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -3962,7 +3962,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
@@ -3980,7 +3980,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
     i64 %1)
@@ -3995,7 +3995,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
@@ -4013,7 +4013,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -4028,7 +4028,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
@@ -4046,7 +4046,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 9,
     i64 %1)
@@ -4061,7 +4061,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 9,
@@ -4079,7 +4079,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i64(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
     i64 9,
     i64 %1)
@@ -4094,7 +4094,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i64(
+  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 9,
@@ -4112,7 +4112,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i64(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
     i64 9,
     i64 %1)
@@ -4127,7 +4127,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i64(
+  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 9,
@@ -4145,7 +4145,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i64(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
     i64 9,
     i64 %1)
@@ -4160,7 +4160,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i64(
+  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 9,
@@ -4178,7 +4178,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i64(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
     i64 9,
     i64 %1)
@@ -4193,7 +4193,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64(<vsca
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i64(
+  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 9,
@@ -4211,7 +4211,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i64(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
     i64 9,
     i64 %1)
@@ -4226,7 +4226,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64(<v
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i64(
+  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 9,
@@ -4244,7 +4244,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i64(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
     i64 9,
     i64 %1)
@@ -4259,7 +4259,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64(<v
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i64(
+  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i64 9,
@@ -4277,7 +4277,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i64(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
     i64 9,
     i64 %1)
@@ -4292,7 +4292,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i64(
+  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 9,
@@ -4310,7 +4310,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64(<vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i64(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
     i64 9,
     i64 %1)
@@ -4325,7 +4325,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i64(
+  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 9,
@@ -4343,7 +4343,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64(<vscale x
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i64(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
     i64 9,
     i64 %1)
@@ -4358,7 +4358,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i64(
+  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 9,
@@ -4376,7 +4376,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64(<vscale x
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i64(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
     i64 9,
     i64 %1)
@@ -4391,7 +4391,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64(<vsc
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i64(
+  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 9,
@@ -4409,7 +4409,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64(<vscal
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i64(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
     i64 9,
     i64 %1)
@@ -4424,7 +4424,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64(<
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i64(
+  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i64 9,
@@ -4442,7 +4442,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64(<vscale
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
     i64 9,
     i64 %1)
@@ -4457,7 +4457,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 9,
@@ -4475,7 +4475,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64(<vscale
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
     i64 9,
     i64 %1)
@@ -4490,7 +4490,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 9,
@@ -4508,7 +4508,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64(<vscale
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
     i64 9,
     i64 %1)
@@ -4523,7 +4523,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 9,
@@ -4541,7 +4541,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64(<vscale
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
     i64 9,
     i64 %1)
@@ -4556,7 +4556,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64(<vs
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     i64 9,


        


More information about the llvm-commits mailing list