[llvm] 89680f2 - [llvm][CodeGen] Rename SVE gather prefetch intrinsics. [NFC]

Francesco Petrogalli via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 15 13:49:38 PDT 2020


Author: Francesco Petrogalli
Date: 2020-04-15T21:49:16+01:00
New Revision: 89680f25e854fe73b95c9f493c073ed1c070e42c

URL: https://github.com/llvm/llvm-project/commit/89680f25e854fe73b95c9f493c073ed1c070e42c
DIFF: https://github.com/llvm/llvm-project/commit/89680f25e854fe73b95c9f493c073ed1c070e42c.diff

LOG: [llvm][CodeGen] Rename SVE gather prefetch intrinsics. [NFC]

Summary:
The renaming is necessary to make the naming scheme uniform with other
gather/scatter load/stores SVE intrinsics.

The naming of variables and functions have been adapted to make it
explicit whether we are dealing with a scalar offset (which is
unscaled) or an index (which is scaled according to the data type of
the lanes of the vector).

Reviewers: andwar, sdesmalen, rengolin

Reviewed By: andwar

Subscribers: tschuett, hiraditya, arphaman, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77839

Added: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll

Removed: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 90ceed72780d..c32fd480d95d 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1277,7 +1277,7 @@ class AdvSIMD_ScatterStore_VS_Intrinsic
                [IntrWriteMem, IntrArgMemOnly]>;
 
 
-class SVE_gather_prf_scalar_base_vector_offset_scaled
+class SVE_gather_prf_SV
     : Intrinsic<[],
                 [
                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
@@ -1287,7 +1287,7 @@ class SVE_gather_prf_scalar_base_vector_offset_scaled
                 ],
                 [IntrInaccessibleMemOrArgMemOnly, NoCapture<1>, ImmArg<3>]>;
 
-class SVE_gather_prf_vector_base_scalar_offset
+class SVE_gather_prf_VS
     : Intrinsic<[],
                 [
                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
@@ -1328,29 +1328,29 @@ def int_aarch64_sve_prf
 
 // Scalar + 32-bit scaled offset vector, zero extend, packed and
 // unpacked.
-def int_aarch64_sve_prfb_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfh_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfw_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfd_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
+def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV;
 
 // Scalar + 32-bit scaled offset vector, sign extend, packed and
 // unpacked.
-def int_aarch64_sve_prfb_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfw_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfh_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfd_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
+def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV;
 
 // Scalar + 64-bit scaled offset vector.
-def int_aarch64_sve_prfb_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfh_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfw_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
-def int_aarch64_sve_prfd_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
+def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV;
 
 // Vector + scalar.
-def int_aarch64_sve_prfb_gather : SVE_gather_prf_vector_base_scalar_offset;
-def int_aarch64_sve_prfh_gather : SVE_gather_prf_vector_base_scalar_offset;
-def int_aarch64_sve_prfw_gather : SVE_gather_prf_vector_base_scalar_offset;
-def int_aarch64_sve_prfd_gather : SVE_gather_prf_vector_base_scalar_offset;
+def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS;
 
 //
 // Scalar to vector operations

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c06c20d86249..6311137d3ef2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13019,9 +13019,9 @@ static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
 }
 
-/// Combines a node carrying the intrinsic `aarch64_sve_prf_gather<T>` into a
-/// node that uses `aarch64_sve_prf_gather<T>_scaled_uxtw` when the scalar
-/// offset passed to `aarch64_sve_prf_gather<T>` is not a valid immediate for
+/// Combines a node carrying the intrinsic `aarch64_sve_prf<T>_gather` into a
+/// node that uses `aarch64_sve_prf<T>_gather_scaled_uxtw` when the scalar
+/// offset passed to `aarch64_sve_prf<T>_gather` is not a valid immediate for
 /// the sve gather prefetch instruction with vector plus immediate addressing
 /// mode.
 static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
@@ -13107,30 +13107,30 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::INTRINSIC_VOID:
   case ISD::INTRINSIC_W_CHAIN:
     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
-    case Intrinsic::aarch64_sve_prfb_gather:
+    case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
       return combineSVEPrefetchVecBaseImmOff(
-          N, DAG, Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw,
+          N, DAG, Intrinsic::aarch64_sve_prfb_gather_uxtw_index,
           1 /*=ScalarSizeInBytes*/);
-    case Intrinsic::aarch64_sve_prfh_gather:
+    case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
       return combineSVEPrefetchVecBaseImmOff(
-          N, DAG, Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw,
+          N, DAG, Intrinsic::aarch64_sve_prfh_gather_uxtw_index,
           2 /*=ScalarSizeInBytes*/);
-    case Intrinsic::aarch64_sve_prfw_gather:
+    case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
       return combineSVEPrefetchVecBaseImmOff(
-          N, DAG, Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw,
+          N, DAG, Intrinsic::aarch64_sve_prfw_gather_uxtw_index,
           4 /*=ScalarSizeInBytes*/);
-    case Intrinsic::aarch64_sve_prfd_gather:
+    case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
       return combineSVEPrefetchVecBaseImmOff(
-          N, DAG, Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw,
+          N, DAG, Intrinsic::aarch64_sve_prfd_gather_uxtw_index,
           8 /*=ScalarSizeInBytes*/);
-    case Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw:
-    case Intrinsic::aarch64_sve_prfb_gather_scaled_sxtw:
-    case Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw:
-    case Intrinsic::aarch64_sve_prfh_gather_scaled_sxtw:
-    case Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw:
-    case Intrinsic::aarch64_sve_prfw_gather_scaled_sxtw:
-    case Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw:
-    case Intrinsic::aarch64_sve_prfd_gather_scaled_sxtw:
+    case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
+    case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
+    case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
+    case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
+    case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
+    case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
+    case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
+    case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
       return legalizeSVEGatherPrefetchOffsVec(N, DAG);
     case Intrinsic::aarch64_neon_ld2:
     case Intrinsic::aarch64_neon_ld3:

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 0583e4bcc1c7..87b2ed464303 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -880,37 +880,37 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
 
   // Gather prefetch using scaled 32-bit offsets, e.g.
   //    prfh pldl1keep, p0, [x0, z0.s, uxtw #1]
-  defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only,  ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>;
-  defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16,     ZPR32ExtUXTW16,    int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>;
-  defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32,     ZPR32ExtUXTW32,    int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>;
-  defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64,     ZPR32ExtUXTW64,    int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>;
+  defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only,  ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_sxtw_index, int_aarch64_sve_prfb_gather_uxtw_index>;
+  defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16,     ZPR32ExtUXTW16,    int_aarch64_sve_prfh_gather_sxtw_index, int_aarch64_sve_prfh_gather_uxtw_index>;
+  defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32,     ZPR32ExtUXTW32,    int_aarch64_sve_prfw_gather_sxtw_index, int_aarch64_sve_prfw_gather_uxtw_index>;
+  defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64,     ZPR32ExtUXTW64,    int_aarch64_sve_prfd_gather_sxtw_index, int_aarch64_sve_prfd_gather_uxtw_index>;
 
   // Gather prefetch using unpacked, scaled 32-bit offsets, e.g.
   //    prfh pldl1keep, p0, [x0, z0.d, uxtw #1]
-  defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>;
-  defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16,    ZPR64ExtUXTW16,    int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>;
-  defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32,    ZPR64ExtUXTW32,    int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>;
-  defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64,    ZPR64ExtUXTW64,    int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>;
+  defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_sxtw_index, int_aarch64_sve_prfb_gather_uxtw_index>;
+  defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16,    ZPR64ExtUXTW16,    int_aarch64_sve_prfh_gather_sxtw_index, int_aarch64_sve_prfh_gather_uxtw_index>;
+  defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32,    ZPR64ExtUXTW32,    int_aarch64_sve_prfw_gather_sxtw_index, int_aarch64_sve_prfw_gather_uxtw_index>;
+  defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64,    ZPR64ExtUXTW64,    int_aarch64_sve_prfd_gather_sxtw_index, int_aarch64_sve_prfd_gather_uxtw_index>;
 
   // Gather prefetch using scaled 64-bit offsets, e.g.
   //    prfh pldl1keep, p0, [x0, z0.d, lsl #1]
-  defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8,  int_aarch64_sve_prfb_gather_scaled>;
-  defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_scaled>;
-  defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_scaled>;
-  defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_scaled>;
+  defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8,  int_aarch64_sve_prfb_gather_index>;
+  defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_index>;
+  defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_index>;
+  defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_index>;
 
   // Gather prefetch using 32/64-bit pointers with offset, e.g.
   //    prfh pldl1keep, p0, [z0.s, #16]
   //    prfh pldl1keep, p0, [z0.d, #16]
-  defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>;
-  defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>;
-  defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>;
-  defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>;
-
-  defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>;
-  defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>;
-  defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>;
-  defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>;
+  defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather_scalar_offset>;
+  defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather_scalar_offset>;
+  defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather_scalar_offset>;
+  defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather_scalar_offset>;
+
+  defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather_scalar_offset>;
+  defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather_scalar_offset>;
+  defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather_scalar_offset>;
+  defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather_scalar_offset>;
 
   defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">;
   defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">;

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
new file mode 100644
index 000000000000..87e22fcf73fc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
@@ -0,0 +1,203 @@
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
+
+; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
+define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32:
+; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.s, uxtw]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32:
+; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.s, sxtw]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>]    -> 32-bit unpacked indexes
+
+define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64:
+; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d, uxtw]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64:
+; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d, sxtw]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
+define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
+; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  ret void
+ }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
+define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32:
+; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.s, uxtw #1]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32:
+; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.s, sxtw #1]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked indexes
+define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64:
+; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, uxtw #1]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64:
+; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, sxtw #1]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
+define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
+; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, lsl #1]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  ret void
+ }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
+define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32:
+; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.s, uxtw #2]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32:
+; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.s, sxtw #2]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked indexes
+define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64:
+; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, uxtw #2]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64:
+; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, sxtw #2]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
+define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
+; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, lsl #2]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  ret void
+ }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
+define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32:
+; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.s, uxtw #3]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32:
+; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.s, sxtw #3]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked indexes
+define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64:
+; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, uxtw #3]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64:
+; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, sxtw #3]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  ret void
+ }
+
+; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
+define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
+; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, lsl #3]
+; CHECK-NEXT:  ret
+  call void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  ret void
+ }
+
+declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+
+declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+
+declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+
+declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll
deleted file mode 100644
index 2de5668d1b9e..000000000000
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll
+++ /dev/null
@@ -1,200 +0,0 @@
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
-
-; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit          scaled offset
-define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32:
-; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.s, uxtw]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32:
-; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.s, sxtw]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>]    -> 32-bit unpacked scaled offset
-
-define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64:
-; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d, uxtw]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64:
-; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d, sxtw]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D]           -> 64-bit          scaled offset
-define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
-; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.d]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
-  ret void
- }
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit          scaled offset
-define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32:
-; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.s, uxtw #1]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32:
-; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.s, sxtw #1]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked scaled offset
-define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64:
-; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, uxtw #1]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64:
-; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, sxtw #1]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D]           -> 64-bit          scaled offset
-define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
-; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.d, lsl #1]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
-  ret void
- }
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit          scaled offset
-define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32:
-; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.s, uxtw #2]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32:
-; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.s, sxtw #2]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked scaled offset
-define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64:
-; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, uxtw #2]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64:
-; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, sxtw #2]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D]           -> 64-bit          scaled offset
-define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
-; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.d, lsl #2]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
-  ret void
- }
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit          scaled offset
-define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32:
-; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.s, uxtw #3]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32:
-; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.s, sxtw #3]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked scaled offset
-define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64:
-; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, uxtw #3]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64:
-; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, sxtw #3]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
-  ret void
- }
-
-; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D]           -> 64-bit          scaled offset
-define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
-; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.d, lsl #3]
-; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
-  ret void
- }
-
-declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll
index 8be10be55f27..6d745eba95b6 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll
@@ -1,82 +1,82 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
 
 ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
-define void @llvm_aarch64_sve_prfb_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32:
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [z0.s, #7]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1)
   ret void
 }
 
 ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
-define void @llvm_aarch64_sve_prfb_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64:
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [z0.d, #7]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1)
   ret void
 }
 
 ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
-define void @llvm_aarch64_sve_prfh_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32:
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [z0.s, #6]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1)
   ret void
 }
 
 ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
-define void @llvm_aarch64_sve_prfh_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64:
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [z0.d, #6]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1)
   ret void
 }
 
 ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
-define void @llvm_aarch64_sve_prfw_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32:
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [z0.s, #12]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1)
   ret void
 }
 
 ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
-define void @llvm_aarch64_sve_prfw_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64:
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [z0.d, #12]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1)
   ret void
 }
 
 ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
-define void @llvm_aarch64_sve_prfd_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32:
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [z0.s, #16]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1)
   ret void
 }
 
 ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
-define void @llvm_aarch64_sve_prfd_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64:
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [z0.d, #16]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1)
   ret void
 }
 
-declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
index ca027edfd5de..c525cec6598a 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
@@ -1,286 +1,286 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
 
 ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 1, ..., 31
-define void @llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %offset, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_runtime_offset:
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [x0, z0.s, uxtw]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #32
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [x[[N]], z0.s, uxtw]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
   ret void
 }
 
 ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 1, ..., 31
-define void @llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %offset, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_runtime_offset:
 ; CHECK-NEXT:   prfb pldl1strm, p0, [x0, z0.d, uxtw]
 ; CHECK-NEXT:   ret
-  call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #32
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [x[[N]], z0.d, uxtw]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfb  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
   ret void
 }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 2, ..., 62
-define void @llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %offset, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_runtime_offset:
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #63
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N]], z0.s, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
   ret void
 }
 
 ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 2, ..., 62
-define void @llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %offset, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_runtime_offset:
 ; CHECK-NEXT:   prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:   ret
-  call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #63
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N]], z0.d, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2:
+define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfh  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
   ret void
 }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 4, ..., 124
-define void @llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %offset, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_runtime_offset:
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #125
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N]], z0.s, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
   ret void
 }
 
 ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 124
-define void @llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %offset, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_runtime_offset:
 ; CHECK-NEXT:   prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:   ret
-  call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #125
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N]], z0.d, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4:
+define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfw  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
   ret void
 }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 8, ..., 248
-define void @llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %offset, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_runtime_offset:
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x0, z0.s, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #125
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N]], z0.s, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
   ret void
 }
 
 ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 248
-define void @llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %offset, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_runtime_offset:
 ; CHECK-NEXT:   prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:   ret
-  call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #125
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N]], z0.d, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK-NEXT:  mov   x[[N:[0-9]+]], #-1
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
   ret void
 }
 
-define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
-; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8:
+define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
+; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8:
 ; CHECK-NEXT:  mov   w[[N:[0-9]+]], #33
 ; CHECK-NEXT:  prfd  pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3]
 ; CHECK-NEXT:  ret
-  call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
   ret void
 }
 
-declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)


        


More information about the llvm-commits mailing list